repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
maxsocl/django | django/dispatch/dispatcher.py | 37 | 12239 | import sys
import threading
import warnings
import weakref
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.six.moves import range
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.",
RemovedInDjango21Warning, stacklevel=2)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| bsd-3-clause |
Celedhrim/persomov | libs/tornado/template.py | 142 | 31156 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
with open(path, "rb") as f:
template = Template(f.read(), name=name, loader=self)
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
| gpl-3.0 |
p0psicles/SickRage | lib/github/Repository.py | 21 | 92750 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Christopher Gilbert <christopher.john.gilbert@gmail.com> #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Adrian Petrescu <adrian.petrescu@maluuba.com> #
# Copyright 2013 Mark Roddy <markroddy@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2015 Jannis Gebauer <ja.geb@me.com> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import urllib
import datetime
import github.GithubObject
import github.PaginatedList
import github.Branch
import github.IssueEvent
import github.ContentFile
import github.Label
import github.GitBlob
import github.Organization
import github.GitRef
import github.GitRelease
import github.Issue
import github.Repository
import github.PullRequest
import github.RepositoryKey
import github.NamedUser
import github.Milestone
import github.Comparison
import github.CommitComment
import github.GitCommit
import github.Team
import github.Commit
import github.GitTree
import github.Hook
import github.Tag
import github.GitTag
import github.Download
import github.Permissions
import github.Event
import github.Legacy
import github.StatsContributor
import github.StatsCommitActivity
import github.StatsCodeFrequency
import github.StatsParticipation
import github.StatsPunchCard
import github.Stargazer
class Repository(github.GithubObject.CompletableGithubObject):
"""
This class represents Repositorys. The reference can be found here http://developer.github.com/v3/repos/
"""
@property
def archive_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._archive_url)
return self._archive_url.value
@property
def assignees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._assignees_url)
return self._assignees_url.value
@property
def blobs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._blobs_url)
return self._blobs_url.value
@property
def branches_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._branches_url)
return self._branches_url.value
@property
def clone_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._clone_url)
return self._clone_url.value
@property
def collaborators_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._collaborators_url)
return self._collaborators_url.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def compare_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._compare_url)
return self._compare_url.value
@property
def contents_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contents_url)
return self._contents_url.value
@property
def contributors_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._contributors_url)
return self._contributors_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def default_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._default_branch)
return self._default_branch.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def downloads_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._downloads_url)
return self._downloads_url.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def fork(self):
"""
:type: bool
"""
self._completeIfNotSet(self._fork)
return self._fork.value
@property
def forks(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._forks_count)
return self._forks_count.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def full_name(self):
"""
:type: string
"""
self._completeIfNotSet(self._full_name)
return self._full_name.value
@property
def git_commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_commits_url)
return self._git_commits_url.value
@property
def git_refs_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_refs_url)
return self._git_refs_url.value
@property
def git_tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_tags_url)
return self._git_tags_url.value
@property
def git_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_url)
return self._git_url.value
@property
def has_downloads(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_downloads)
return self._has_downloads.value
@property
def has_issues(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_issues)
return self._has_issues.value
@property
def has_wiki(self):
"""
:type: bool
"""
self._completeIfNotSet(self._has_wiki)
return self._has_wiki.value
@property
def homepage(self):
"""
:type: string
"""
self._completeIfNotSet(self._homepage)
return self._homepage.value
@property
def hooks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._hooks_url)
return self._hooks_url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_comment_url)
return self._issue_comment_url.value
@property
def issue_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_events_url)
return self._issue_events_url.value
@property
def issues_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issues_url)
return self._issues_url.value
@property
def keys_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._keys_url)
return self._keys_url.value
@property
def labels_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def language(self):
"""
:type: string
"""
self._completeIfNotSet(self._language)
return self._language.value
@property
def languages_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._languages_url)
return self._languages_url.value
@property
def master_branch(self):
"""
:type: string
"""
self._completeIfNotSet(self._master_branch)
return self._master_branch.value
@property
def merges_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._merges_url)
return self._merges_url.value
@property
def milestones_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._milestones_url)
return self._milestones_url.value
@property
def mirror_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._mirror_url)
return self._mirror_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def network_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._network_count)
return self._network_count.value
@property
def notifications_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._notifications_url)
return self._notifications_url.value
@property
def open_issues(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def open_issues_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._open_issues_count)
return self._open_issues_count.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def parent(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._parent)
return self._parent.value
@property
def permissions(self):
"""
:type: :class:`github.Permissions.Permissions`
"""
self._completeIfNotSet(self._permissions)
return self._permissions.value
@property
def private(self):
"""
:type: bool
"""
self._completeIfNotSet(self._private)
return self._private.value
@property
def pulls_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._pulls_url)
return self._pulls_url.value
@property
def pushed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._pushed_at)
return self._pushed_at.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def source(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._source)
return self._source.value
@property
def ssh_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._ssh_url)
return self._ssh_url.value
@property
def stargazers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._stargazers_count) # pragma no cover (Should be covered)
return self._stargazers_count.value # pragma no cover (Should be covered)
@property
def stargazers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._stargazers_url)
return self._stargazers_url.value
@property
def statuses_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._statuses_url)
return self._statuses_url.value
@property
def subscribers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscribers_url)
return self._subscribers_url.value
@property
def subscription_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def svn_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._svn_url)
return self._svn_url.value
@property
def tags_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._tags_url)
return self._tags_url.value
@property
def teams_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._teams_url)
return self._teams_url.value
@property
def trees_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._trees_url)
return self._trees_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def watchers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers)
return self._watchers.value
@property
def watchers_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._watchers_count)
return self._watchers_count.value
def add_to_collaborators(self, collaborator):
"""
:calls: `PUT /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/collaborators/" + collaborator
)
def compare(self, base, head):
"""
:calls: `GET /repos/:owner/:repo/compare/:base...:head <http://developer.github.com/v3/repos/commits>`_
:param base: string
:param head: string
:rtype: :class:`github.Comparison.Comparison`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/compare/" + base + "..." + head
)
return github.Comparison.Comparison(self._requester, headers, data, completed=True)
def create_git_blob(self, content, encoding):
"""
:calls: `POST /repos/:owner/:repo/git/blobs <http://developer.github.com/v3/git/blobs>`_
:param content: string
:param encoding: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(content, (str, unicode)), content
assert isinstance(encoding, (str, unicode)), encoding
post_parameters = {
"content": content,
"encoding": encoding,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/blobs",
input=post_parameters
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def create_git_commit(self, message, tree, parents, author=github.GithubObject.NotSet, committer=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/commits <http://developer.github.com/v3/git/commits>`_
:param message: string
:param tree: :class:`github.GitTree.GitTree`
:param parents: list of :class:`github.GitCommit.GitCommit`
:param author: :class:`github.InputGitAuthor.InputGitAuthor`
:param committer: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(message, (str, unicode)), message
assert isinstance(tree, github.GitTree.GitTree), tree
assert all(isinstance(element, github.GitCommit.GitCommit) for element in parents), parents
assert author is github.GithubObject.NotSet or isinstance(author, github.InputGitAuthor), author
assert committer is github.GithubObject.NotSet or isinstance(committer, github.InputGitAuthor), committer
post_parameters = {
"message": message,
"tree": tree._identity,
"parents": [element._identity for element in parents],
}
if author is not github.GithubObject.NotSet:
post_parameters["author"] = author._identity
if committer is not github.GithubObject.NotSet:
post_parameters["committer"] = committer._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/commits",
input=post_parameters
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def create_git_ref(self, ref, sha):
"""
:calls: `POST /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:param ref: string
:param sha: string
:rtype: :class:`github.GitRef.GitRef`
"""
assert isinstance(ref, (str, unicode)), ref
assert isinstance(sha, (str, unicode)), sha
post_parameters = {
"ref": ref,
"sha": sha,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/refs",
input=post_parameters
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def create_git_tag_and_release(self, tag, tag_message, release_name, release_message, object, type, tagger=github.GithubObject.NotSet, draft=False, prerelease=False):
self.create_git_tag(tag, tag_message, object, type, tagger)
return self.create_git_release(tag, release_name, release_message, draft, prerelease)
def create_git_release(self, tag, name, message, draft=False, prerelease=False):
assert isinstance(tag, (str, unicode)), tag
assert isinstance(name, (str, unicode)), name
assert isinstance(message, (str, unicode)), message
assert isinstance(draft, bool), draft
assert isinstance(prerelease, bool), prerelease
post_parameters = {
"tag_name": tag,
"name": name,
"body": message,
"draft": draft,
"prerelease": prerelease,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/releases",
input=post_parameters
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
def create_git_tag(self, tag, message, object, type, tagger=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/tags <http://developer.github.com/v3/git/tags>`_
:param tag: string
:param message: string
:param object: string
:param type: string
:param tagger: :class:`github.InputGitAuthor.InputGitAuthor`
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(tag, (str, unicode)), tag
assert isinstance(message, (str, unicode)), message
assert isinstance(object, (str, unicode)), object
assert isinstance(type, (str, unicode)), type
assert tagger is github.GithubObject.NotSet or isinstance(tagger, github.InputGitAuthor), tagger
post_parameters = {
"tag": tag,
"message": message,
"object": object,
"type": type,
}
if tagger is not github.GithubObject.NotSet:
post_parameters["tagger"] = tagger._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/tags",
input=post_parameters
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def create_git_tree(self, tree, base_tree=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/git/trees <http://developer.github.com/v3/git/trees>`_
:param tree: list of :class:`github.InputGitTreeElement.InputGitTreeElement`
:param base_tree: :class:`github.GitTree.GitTree`
:rtype: :class:`github.GitTree.GitTree`
"""
assert all(isinstance(element, github.InputGitTreeElement) for element in tree), tree
assert base_tree is github.GithubObject.NotSet or isinstance(base_tree, github.GitTree.GitTree), base_tree
post_parameters = {
"tree": [element._identity for element in tree],
}
if base_tree is not github.GithubObject.NotSet:
post_parameters["base_tree"] = base_tree._identity
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/git/trees",
input=post_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def create_hook(self, name, config, events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/hooks",
input=post_parameters
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def create_issue(self, title, body=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, milestone=github.GithubObject.NotSet, labels=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param title: string
:param body: string
:param assignee: string or :class:`github.NamedUser.NamedUser`
:param milestone: :class:`github.Milestone.Milestone`
:param labels: list of :class:`github.Label.Label`
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert milestone is github.GithubObject.NotSet or isinstance(milestone, github.Milestone.Milestone), milestone
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) or isinstance(element, str) for element in labels), labels
post_parameters = {
"title": title,
}
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, (str, unicode)):
post_parameters["assignee"] = assignee
else:
post_parameters["assignee"] = assignee._identity
if milestone is not github.GithubObject.NotSet:
post_parameters["milestone"] = milestone._identity
if labels is not github.GithubObject.NotSet:
post_parameters["labels"] = [element.name if isinstance(element, github.Label.Label) else element for element in labels]
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/issues",
input=post_parameters
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/keys",
input=post_parameters
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def create_label(self, name, color):
"""
:calls: `POST /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:param name: string
:param color: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(color, (str, unicode)), color
post_parameters = {
"name": name,
"color": color,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/labels",
input=post_parameters
)
return github.Label.Label(self._requester, headers, data, completed=True)
def create_milestone(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param title: string
:param state: string
:param description: string
:param due_on: date
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(title, (str, unicode)), title
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on
post_parameters = {
"title": title,
}
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if due_on is not github.GithubObject.NotSet:
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/milestones",
input=post_parameters
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def create_pull(self, *args, **kwds):
"""
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param issue: :class:`github.Issue.Issue`
:param base: string
:param head: string
:rtype: :class:`github.PullRequest.PullRequest`
"""
if len(args) + len(kwds) == 4:
return self.__create_pull_1(*args, **kwds)
else:
return self.__create_pull_2(*args, **kwds)
def __create_pull_1(self, title, body, base, head):
assert isinstance(title, (str, unicode)), title
assert isinstance(body, (str, unicode)), body
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(title=title, body=body, base=base, head=head)
def __create_pull_2(self, issue, base, head):
assert isinstance(issue, github.Issue.Issue), issue
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
return self.__create_pull(issue=issue._identity, base=base, head=head)
def __create_pull(self, **kwds):
post_parameters = kwds
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/pulls",
input=post_parameters
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, default_branch=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param default_branch: string
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert default_branch is github.GithubObject.NotSet or isinstance(default_branch, (str, unicode)), default_branch
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if default_branch is not github.GithubObject.NotSet:
post_parameters["default_branch"] = default_branch
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_archive_link(self, archive_format, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/:archive_format/:ref <http://developer.github.com/v3/repos/contents>`_
:param archive_format: string
:param ref: string
:rtype: string
"""
assert isinstance(archive_format, (str, unicode)), archive_format
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url = self.url + "/" + archive_format
if ref is not github.GithubObject.NotSet:
url += "/" + ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
url
)
return headers["location"]
def get_assignees(self):
"""
:calls: `GET /repos/:owner/:repo/assignees <http://developer.github.com/v3/issues/assignees>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/assignees",
None
)
def get_branch(self, branch):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch <http://developer.github.com/v3/repos>`_
:param branch: string
:rtype: :class:`github.Branch.Branch`
"""
assert isinstance(branch, (str, unicode)), branch
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/branches/" + branch
)
return github.Branch.Branch(self._requester, headers, data, completed=True)
def get_branches(self):
"""
:calls: `GET /repos/:owner/:repo/branches <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Branch.Branch`
"""
return github.PaginatedList.PaginatedList(
github.Branch.Branch,
self._requester,
self.url + "/branches",
None
)
def get_collaborators(self):
"""
:calls: `GET /repos/:owner/:repo/collaborators <http://developer.github.com/v3/repos/collaborators>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/collaborators",
None
)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/comments/:id <http://developer.github.com/v3/repos/comments>`_
:param id: integer
:rtype: :class:`github.CommitComment.CommitComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/comments/" + str(id)
)
return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
)
def get_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/commits/:sha <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/commits/" + sha
)
return github.Commit.Commit(self._requester, headers, data, completed=True)
def get_commits(self, sha=github.GithubObject.NotSet, path=github.GithubObject.NotSet, since=github.GithubObject.NotSet, until=github.GithubObject.NotSet, author=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/commits <http://developer.github.com/v3/repos/commits>`_
:param sha: string
:param path: string
:param since: datetime.datetime
:param until: datetime.datetime
:param author: string or :class:`github.NamedUser.NamedUser` or :class:`github.AuthenticatedUser.AuthenticatedUser`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert until is github.GithubObject.NotSet or isinstance(until, datetime.datetime), until
assert author is github.GithubObject.NotSet or isinstance(author, (str, unicode, github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)), author
url_parameters = dict()
if sha is not github.GithubObject.NotSet:
url_parameters["sha"] = sha
if path is not github.GithubObject.NotSet:
url_parameters["path"] = path
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if until is not github.GithubObject.NotSet:
url_parameters["until"] = until.strftime("%Y-%m-%dT%H:%M:%SZ")
if author is not github.GithubObject.NotSet:
if isinstance(author, (github.NamedUser.NamedUser, github.AuthenticatedUser.AuthenticatedUser)):
url_parameters["author"] = author.login
else:
url_parameters["author"] = author
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
url_parameters
)
def get_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
return self.get_file_contents(path, ref)
def get_file_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_dir_contents(self, path, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents>`_
:param path: string
:param ref: string
:rtype: list of :class:`github.ContentFile.ContentFile`
"""
assert isinstance(path, (str, unicode)), path
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/contents" + path,
parameters=url_parameters
)
# Handle 302 redirect response
if headers.get('status') == '302 Found' and headers.get('location'):
headers, data = self._requester.requestJsonAndCheck(
"GET",
headers['location'],
parameters=url_parameters
)
return [
github.ContentFile.ContentFile(self._requester, headers, attributes, completed=(attributes["type"] != "file")) # Lazy completion only makes sense for files. See discussion here: https://github.com/jacquev6/PyGithub/issues/140#issuecomment-13481130
for attributes in data
]
def get_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/contributors <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/contributors",
None
)
def get_download(self, id):
"""
:calls: `GET /repos/:owner/:repo/downloads/:id <http://developer.github.com/v3/repos/downloads>`_
:param id: integer
:rtype: :class:`github.Download.Download`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/downloads/" + str(id)
)
return github.Download.Download(self._requester, headers, data, completed=True)
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
)
def get_events(self):
"""
:calls: `GET /repos/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_forks(self):
"""
:calls: `GET /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
Repository,
self._requester,
self.url + "/forks",
None
)
def get_git_blob(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/blobs/:sha <http://developer.github.com/v3/git/blobs>`_
:param sha: string
:rtype: :class:`github.GitBlob.GitBlob`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/blobs/" + sha
)
return github.GitBlob.GitBlob(self._requester, headers, data, completed=True)
def get_git_commit(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/commits/:sha <http://developer.github.com/v3/git/commits>`_
:param sha: string
:rtype: :class:`github.GitCommit.GitCommit`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/commits/" + sha
)
return github.GitCommit.GitCommit(self._requester, headers, data, completed=True)
def get_git_ref(self, ref):
"""
:calls: `GET /repos/:owner/:repo/git/refs/:ref <http://developer.github.com/v3/git/refs>`_
:param ref: string
:rtype: :class:`github.GitRef.GitRef`
"""
prefix = "/git/refs/"
if not self._requester.FIX_REPO_GET_GIT_REF:
prefix = "/git/"
assert isinstance(ref, (str, unicode)), ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + prefix + ref
)
return github.GitRef.GitRef(self._requester, headers, data, completed=True)
def get_git_refs(self):
"""
:calls: `GET /repos/:owner/:repo/git/refs <http://developer.github.com/v3/git/refs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GitRef.GitRef`
"""
return github.PaginatedList.PaginatedList(
github.GitRef.GitRef,
self._requester,
self.url + "/git/refs",
None
)
def get_git_tag(self, sha):
"""
:calls: `GET /repos/:owner/:repo/git/tags/:sha <http://developer.github.com/v3/git/tags>`_
:param sha: string
:rtype: :class:`github.GitTag.GitTag`
"""
assert isinstance(sha, (str, unicode)), sha
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/tags/" + sha
)
return github.GitTag.GitTag(self._requester, headers, data, completed=True)
def get_git_tree(self, sha, recursive=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/git/trees/:sha <http://developer.github.com/v3/git/trees>`_
:param sha: string
:param recursive: bool
:rtype: :class:`github.GitTree.GitTree`
"""
assert isinstance(sha, (str, unicode)), sha
assert recursive is github.GithubObject.NotSet or isinstance(recursive, bool), recursive
url_parameters = dict()
if recursive is not github.GithubObject.NotSet:
url_parameters["recursive"] = recursive
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/git/trees/" + sha,
parameters=url_parameters
)
return github.GitTree.GitTree(self._requester, headers, data, completed=True)
def get_hook(self, id):
"""
:calls: `GET /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param id: integer
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/hooks/" + str(id)
)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_hooks(self):
"""
:calls: `GET /repos/:owner/:repo/hooks <http://developer.github.com/v3/repos/hooks>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Hook.Hook`
"""
return github.PaginatedList.PaginatedList(
github.Hook.Hook,
self._requester,
self.url + "/hooks",
None
)
def get_issue(self, number):
"""
:calls: `GET /repos/:owner/:repo/issues/:number <http://developer.github.com/v3/issues>`_
:param number: integer
:rtype: :class:`github.Issue.Issue`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/" + str(number)
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def get_issues(self, milestone=github.GithubObject.NotSet, state=github.GithubObject.NotSet, assignee=github.GithubObject.NotSet, mentioned=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet, creator=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues <http://developer.github.com/v3/issues>`_
:param milestone: :class:`github.Milestone.Milestone` or "none" or "*"
:param state: string
:param assignee: string or :class:`github.NamedUser.NamedUser` or "none" or "*"
:param mentioned: :class:`github.NamedUser.NamedUser`
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:param creator: string or :class:`github.NamedUser.NamedUser`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert milestone is github.GithubObject.NotSet or milestone == "*" or milestone == "none" or isinstance(milestone, github.Milestone.Milestone), milestone
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert assignee is github.GithubObject.NotSet or isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
assert mentioned is github.GithubObject.NotSet or isinstance(mentioned, github.NamedUser.NamedUser), mentioned
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert creator is github.GithubObject.NotSet or isinstance(creator, github.NamedUser.NamedUser) or isinstance(creator, (str, unicode)), creator
url_parameters = dict()
if milestone is not github.GithubObject.NotSet:
if isinstance(milestone, str):
url_parameters["milestone"] = milestone
else:
url_parameters["milestone"] = milestone._identity
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if assignee is not github.GithubObject.NotSet:
if isinstance(assignee, str):
url_parameters["assignee"] = assignee
else:
url_parameters["assignee"] = assignee._identity
if mentioned is not github.GithubObject.NotSet:
url_parameters["mentioned"] = mentioned._identity
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if creator is not github.GithubObject.NotSet:
if isinstance(creator, str):
url_parameters["creator"] = creator
else:
url_parameters["creator"] = creator._identity
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
self.url + "/issues",
url_parameters
)
def get_issues_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/issues/comments <http://developer.github.com/v3/issues/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/issues/comments",
url_parameters
)
def get_issues_event(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/events/:id <http://developer.github.com/v3/issues/events>`_
:param id: integer
:rtype: :class:`github.IssueEvent.IssueEvent`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/issues/events/" + str(id)
)
return github.IssueEvent.IssueEvent(self._requester, headers, data, completed=True)
def get_issues_events(self):
"""
:calls: `GET /repos/:owner/:repo/issues/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
"""
return github.PaginatedList.PaginatedList(
github.IssueEvent.IssueEvent,
self._requester,
self.url + "/issues/events",
None
)
def get_key(self, id):
"""
:calls: `GET /repos/:owner/:repo/keys/:id <http://developer.github.com/v3/repos/keys>`_
:param id: integer
:rtype: :class:`github.RepositoryKey.RepositoryKey`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/keys/" + str(id)
)
return github.RepositoryKey.RepositoryKey(self._requester, headers, data, completed=True, repoUrl=self.url)
def get_keys(self):
"""
:calls: `GET /repos/:owner/:repo/keys <http://developer.github.com/v3/repos/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.RepositoryKey.RepositoryKey`
"""
return github.PaginatedList.PaginatedList(
lambda requester, headers, data, completed: github.RepositoryKey.RepositoryKey(requester, headers, data, completed, repoUrl=self.url),
self._requester,
self.url + "/keys",
None
)
def get_label(self, name):
"""
:calls: `GET /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param name: string
:rtype: :class:`github.Label.Label`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/labels/" + urllib.quote(name)
)
return github.Label.Label(self._requester, headers, data, completed=True)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.url + "/labels",
None
)
def get_languages(self):
"""
:calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/languages"
)
return data
def get_milestone(self, number):
"""
:calls: `GET /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_
:param number: integer
:rtype: :class:`github.Milestone.Milestone`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/milestones/" + str(number)
)
return github.Milestone.Milestone(self._requester, headers, data, completed=True)
def get_milestones(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_
:param state: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Milestone.Milestone`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Milestone.Milestone,
self._requester,
self.url + "/milestones",
url_parameters
)
def get_network_events(self):
"""
:calls: `GET /networks/:owner/:repo/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/networks/" + self.owner.login + "/" + self.name + "/events",
None
)
def get_pull(self, number):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param number: integer
:rtype: :class:`github.PullRequest.PullRequest`
"""
assert isinstance(number, (int, long)), number
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/pulls/" + str(number)
)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def get_pulls(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, base=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param state: string
:param sort: string
:param direction: string
:param base: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequest.PullRequest`
"""
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert base is github.GithubObject.NotSet or isinstance(base, (str, unicode)), base
url_parameters = dict()
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if base is not github.GithubObject.NotSet:
url_parameters["base"] = base
return github.PaginatedList.PaginatedList(
github.PullRequest.PullRequest,
self._requester,
self.url + "/pulls",
url_parameters
)
def get_pulls_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_pulls_review_comments(sort, direction, since)
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
)
def get_readme(self, ref=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/readme <http://developer.github.com/v3/repos/contents>`_
:param ref: string
:rtype: :class:`github.ContentFile.ContentFile`
"""
assert ref is github.GithubObject.NotSet or isinstance(ref, (str, unicode)), ref
url_parameters = dict()
if ref is not github.GithubObject.NotSet:
url_parameters["ref"] = ref
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/readme",
parameters=url_parameters
)
return github.ContentFile.ContentFile(self._requester, headers, data, completed=True)
def get_stargazers(self):
"""
:calls: `GET /repos/:owner/:repo/stargazers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/stargazers",
None
)
def get_stargazers_with_dates(self):
"""
:calls: `GET /repos/:owner/:repo/stargazers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Stargazer.Stargazer`
"""
return github.PaginatedList.PaginatedList(
github.Stargazer.Stargazer,
self._requester,
self.url + "/stargazers",
None,
headers={'Accept': 'application/vnd.github.v3.star+json'}
)
def get_stats_contributors(self):
"""
:calls: `GET /repos/:owner/:repo/stats/contributors <http://developer.github.com/v3/repos/statistics/#get-contributors-list-with-additions-deletions-and-commit-counts>`_
:rtype: None or list of :class:`github.StatsContributor.StatsContributor`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/contributors"
)
if data == {}:
return None
else:
return [
github.StatsContributor.StatsContributor(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_commit_activity(self):
"""
:calls: `GET /repos/:owner/:repo/stats/commit_activity <developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or list of :class:`github.StatsCommitActivity.StatsCommitActivity`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/commit_activity"
)
if data == {}:
return None
else:
return [
github.StatsCommitActivity.StatsCommitActivity(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_code_frequency(self):
"""
:calls: `GET /repos/:owner/:repo/stats/code_frequency <http://developer.github.com/v3/repos/statistics/#get-the-number-of-additions-and-deletions-per-week>`_
:rtype: None or list of :class:`github.StatsCodeFrequency.StatsCodeFrequency`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/code_frequency"
)
if data == {}:
return None
else:
return [
github.StatsCodeFrequency.StatsCodeFrequency(self._requester, headers, attributes, completed=True)
for attributes in data
]
def get_stats_participation(self):
"""
:calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_
:rtype: None or :class:`github.StatsParticipation.StatsParticipation`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/participation"
)
if data == {}:
return None
else:
return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True)
def get_stats_punch_card(self):
"""
:calls: `GET /repos/:owner/:repo/stats/punch_card <http://developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day>`_
:rtype: None or :class:`github.StatsPunchCard.StatsPunchCard`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/stats/punch_card"
)
if data == {}:
return None
else:
return github.StatsPunchCard.StatsPunchCard(self._requester, headers, data, completed=True)
def get_subscribers(self):
"""
:calls: `GET /repos/:owner/:repo/subscribers <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/subscribers",
None
)
def get_tags(self):
"""
:calls: `GET /repos/:owner/:repo/tags <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Tag.Tag`
"""
return github.PaginatedList.PaginatedList(
github.Tag.Tag,
self._requester,
self.url + "/tags",
None
)
def get_releases(self):
"""
:calls: `GET /repos/:owner/:repo/releases <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Tag.Tag`
"""
return github.PaginatedList.PaginatedList(
github.GitRelease.GitRelease,
self._requester,
self.url + "/releases",
None
)
def get_release(self, id):
"""
:calls: `GET /repos/:owner/:repo/releases/:id https://developer.github.com/v3/repos/releases/#get-a-single-release
:param id: int (release id), str (tag name)
:rtype: None or :class:`github.GitRelease.GitRelease`
"""
if isinstance(id, int):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/releases/" + str(id)
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
elif isinstance(id, str):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/releases/tags/" + id
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
def get_teams(self):
"""
:calls: `GET /repos/:owner/:repo/teams <http://developer.github.com/v3/repos>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/teams",
None
)
def get_watchers(self):
"""
:calls: `GET /repos/:owner/:repo/watchers <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/watchers",
None
)
def has_in_assignees(self, assignee):
"""
:calls: `GET /repos/:owner/:repo/assignees/:assignee <http://developer.github.com/v3/issues/assignees>`_
:param assignee: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(assignee, github.NamedUser.NamedUser) or isinstance(assignee, (str, unicode)), assignee
if isinstance(assignee, github.NamedUser.NamedUser):
assignee = assignee._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/assignees/" + assignee
)
return status == 204
def has_in_collaborators(self, collaborator):
"""
:calls: `GET /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/collaborators/" + collaborator
)
return status == 204
def legacy_search_issues(self, state, keyword):
"""
:calls: `GET /legacy/issues/search/:owner/:repository/:state/:keyword <http://developer.github.com/v3/search/legacy>`_
:param state: "open" or "closed"
:param keyword: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert state in ["open", "closed"], state
assert isinstance(keyword, (str, unicode)), keyword
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/legacy/issues/search/" + self.owner.login + "/" + self.name + "/" + state + "/" + urllib.quote(keyword)
)
return [
github.Issue.Issue(self._requester, headers, github.Legacy.convertIssue(element), completed=False)
for element in data["issues"]
]
def merge(self, base, head, commit_message=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/merges <http://developer.github.com/v3/repos/merging>`_
:param base: string
:param head: string
:param commit_message: string
:rtype: :class:`github.Commit.Commit`
"""
assert isinstance(base, (str, unicode)), base
assert isinstance(head, (str, unicode)), head
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
post_parameters = {
"base": base,
"head": head,
}
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/merges",
input=post_parameters
)
if data is None:
return None
else:
return github.Commit.Commit(self._requester, headers, data, completed=True)
def remove_from_collaborators(self, collaborator):
"""
:calls: `DELETE /repos/:owner/:repo/collaborators/:user <http://developer.github.com/v3/repos/collaborators>`_
:param collaborator: string or :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser) or isinstance(collaborator, (str, unicode)), collaborator
if isinstance(collaborator, github.NamedUser.NamedUser):
collaborator = collaborator._identity
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/collaborators/" + collaborator
)
def subscribe_to_hub(self, event, callback, secret=github.GithubObject.NotSet):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("subscribe", event, callback, secret)
def unsubscribe_from_hub(self, event, callback):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("unsubscribe", event, callback, github.GithubObject.NotSet)
def _hub(self, mode, event, callback, secret):
assert isinstance(mode, (str, unicode)), mode
assert isinstance(event, (str, unicode)), event
assert isinstance(callback, (str, unicode)), callback
assert secret is github.GithubObject.NotSet or isinstance(secret, (str, unicode)), secret
post_parameters = {
"hub.mode": mode,
"hub.topic": "https://github.com/" + self.full_name + "/events/" + event,
"hub.callback": callback,
}
if secret is not github.GithubObject.NotSet:
post_parameters["hub.secret"] = secret
headers, output = self._requester.requestMultipartAndCheck(
"POST",
"/hub",
input=post_parameters
)
@property
def _identity(self):
return self.owner.login + "/" + self.name
def _initAttributes(self):
self._archive_url = github.GithubObject.NotSet
self._assignees_url = github.GithubObject.NotSet
self._blobs_url = github.GithubObject.NotSet
self._branches_url = github.GithubObject.NotSet
self._clone_url = github.GithubObject.NotSet
self._collaborators_url = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._compare_url = github.GithubObject.NotSet
self._contents_url = github.GithubObject.NotSet
self._contributors_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._default_branch = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._downloads_url = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._fork = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_count = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._full_name = github.GithubObject.NotSet
self._git_commits_url = github.GithubObject.NotSet
self._git_refs_url = github.GithubObject.NotSet
self._git_tags_url = github.GithubObject.NotSet
self._git_url = github.GithubObject.NotSet
self._has_downloads = github.GithubObject.NotSet
self._has_issues = github.GithubObject.NotSet
self._has_wiki = github.GithubObject.NotSet
self._homepage = github.GithubObject.NotSet
self._hooks_url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_comment_url = github.GithubObject.NotSet
self._issue_events_url = github.GithubObject.NotSet
self._issues_url = github.GithubObject.NotSet
self._keys_url = github.GithubObject.NotSet
self._labels_url = github.GithubObject.NotSet
self._language = github.GithubObject.NotSet
self._languages_url = github.GithubObject.NotSet
self._master_branch = github.GithubObject.NotSet
self._merges_url = github.GithubObject.NotSet
self._milestones_url = github.GithubObject.NotSet
self._mirror_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._network_count = github.GithubObject.NotSet
self._notifications_url = github.GithubObject.NotSet
self._open_issues = github.GithubObject.NotSet
self._open_issues_count = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._parent = github.GithubObject.NotSet
self._permissions = github.GithubObject.NotSet
self._private = github.GithubObject.NotSet
self._pulls_url = github.GithubObject.NotSet
self._pushed_at = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._source = github.GithubObject.NotSet
self._ssh_url = github.GithubObject.NotSet
self._stargazers_count = github.GithubObject.NotSet
self._stargazers_url = github.GithubObject.NotSet
self._statuses_url = github.GithubObject.NotSet
self._subscribers_url = github.GithubObject.NotSet
self._subscription_url = github.GithubObject.NotSet
self._svn_url = github.GithubObject.NotSet
self._tags_url = github.GithubObject.NotSet
self._teams_url = github.GithubObject.NotSet
self._trees_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._watchers = github.GithubObject.NotSet
self._watchers_count = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "archive_url" in attributes: # pragma no branch
self._archive_url = self._makeStringAttribute(attributes["archive_url"])
if "assignees_url" in attributes: # pragma no branch
self._assignees_url = self._makeStringAttribute(attributes["assignees_url"])
if "blobs_url" in attributes: # pragma no branch
self._blobs_url = self._makeStringAttribute(attributes["blobs_url"])
if "branches_url" in attributes: # pragma no branch
self._branches_url = self._makeStringAttribute(attributes["branches_url"])
if "clone_url" in attributes: # pragma no branch
self._clone_url = self._makeStringAttribute(attributes["clone_url"])
if "collaborators_url" in attributes: # pragma no branch
self._collaborators_url = self._makeStringAttribute(attributes["collaborators_url"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "compare_url" in attributes: # pragma no branch
self._compare_url = self._makeStringAttribute(attributes["compare_url"])
if "contents_url" in attributes: # pragma no branch
self._contents_url = self._makeStringAttribute(attributes["contents_url"])
if "contributors_url" in attributes: # pragma no branch
self._contributors_url = self._makeStringAttribute(attributes["contributors_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "default_branch" in attributes: # pragma no branch
self._default_branch = self._makeStringAttribute(attributes["default_branch"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "downloads_url" in attributes: # pragma no branch
self._downloads_url = self._makeStringAttribute(attributes["downloads_url"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "fork" in attributes: # pragma no branch
self._fork = self._makeBoolAttribute(attributes["fork"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeIntAttribute(attributes["forks"])
if "forks_count" in attributes: # pragma no branch
self._forks_count = self._makeIntAttribute(attributes["forks_count"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "full_name" in attributes: # pragma no branch
self._full_name = self._makeStringAttribute(attributes["full_name"])
if "git_commits_url" in attributes: # pragma no branch
self._git_commits_url = self._makeStringAttribute(attributes["git_commits_url"])
if "git_refs_url" in attributes: # pragma no branch
self._git_refs_url = self._makeStringAttribute(attributes["git_refs_url"])
if "git_tags_url" in attributes: # pragma no branch
self._git_tags_url = self._makeStringAttribute(attributes["git_tags_url"])
if "git_url" in attributes: # pragma no branch
self._git_url = self._makeStringAttribute(attributes["git_url"])
if "has_downloads" in attributes: # pragma no branch
self._has_downloads = self._makeBoolAttribute(attributes["has_downloads"])
if "has_issues" in attributes: # pragma no branch
self._has_issues = self._makeBoolAttribute(attributes["has_issues"])
if "has_wiki" in attributes: # pragma no branch
self._has_wiki = self._makeBoolAttribute(attributes["has_wiki"])
if "homepage" in attributes: # pragma no branch
self._homepage = self._makeStringAttribute(attributes["homepage"])
if "hooks_url" in attributes: # pragma no branch
self._hooks_url = self._makeStringAttribute(attributes["hooks_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_comment_url" in attributes: # pragma no branch
self._issue_comment_url = self._makeStringAttribute(attributes["issue_comment_url"])
if "issue_events_url" in attributes: # pragma no branch
self._issue_events_url = self._makeStringAttribute(attributes["issue_events_url"])
if "issues_url" in attributes: # pragma no branch
self._issues_url = self._makeStringAttribute(attributes["issues_url"])
if "keys_url" in attributes: # pragma no branch
self._keys_url = self._makeStringAttribute(attributes["keys_url"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "language" in attributes: # pragma no branch
self._language = self._makeStringAttribute(attributes["language"])
if "languages_url" in attributes: # pragma no branch
self._languages_url = self._makeStringAttribute(attributes["languages_url"])
if "master_branch" in attributes: # pragma no branch
self._master_branch = self._makeStringAttribute(attributes["master_branch"])
if "merges_url" in attributes: # pragma no branch
self._merges_url = self._makeStringAttribute(attributes["merges_url"])
if "milestones_url" in attributes: # pragma no branch
self._milestones_url = self._makeStringAttribute(attributes["milestones_url"])
if "mirror_url" in attributes: # pragma no branch
self._mirror_url = self._makeStringAttribute(attributes["mirror_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "network_count" in attributes: # pragma no branch
self._network_count = self._makeIntAttribute(attributes["network_count"])
if "notifications_url" in attributes: # pragma no branch
self._notifications_url = self._makeStringAttribute(attributes["notifications_url"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "open_issues_count" in attributes: # pragma no branch
self._open_issues_count = self._makeIntAttribute(attributes["open_issues_count"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(github.Organization.Organization, attributes["organization"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "parent" in attributes: # pragma no branch
self._parent = self._makeClassAttribute(Repository, attributes["parent"])
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeClassAttribute(github.Permissions.Permissions, attributes["permissions"])
if "private" in attributes: # pragma no branch
self._private = self._makeBoolAttribute(attributes["private"])
if "pulls_url" in attributes: # pragma no branch
self._pulls_url = self._makeStringAttribute(attributes["pulls_url"])
if "pushed_at" in attributes: # pragma no branch
self._pushed_at = self._makeDatetimeAttribute(attributes["pushed_at"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "source" in attributes: # pragma no branch
self._source = self._makeClassAttribute(Repository, attributes["source"])
if "ssh_url" in attributes: # pragma no branch
self._ssh_url = self._makeStringAttribute(attributes["ssh_url"])
if "stargazers_count" in attributes: # pragma no branch
self._stargazers_count = self._makeIntAttribute(attributes["stargazers_count"])
if "stargazers_url" in attributes: # pragma no branch
self._stargazers_url = self._makeStringAttribute(attributes["stargazers_url"])
if "statuses_url" in attributes: # pragma no branch
self._statuses_url = self._makeStringAttribute(attributes["statuses_url"])
if "subscribers_url" in attributes: # pragma no branch
self._subscribers_url = self._makeStringAttribute(attributes["subscribers_url"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "svn_url" in attributes: # pragma no branch
self._svn_url = self._makeStringAttribute(attributes["svn_url"])
if "tags_url" in attributes: # pragma no branch
self._tags_url = self._makeStringAttribute(attributes["tags_url"])
if "teams_url" in attributes: # pragma no branch
self._teams_url = self._makeStringAttribute(attributes["teams_url"])
if "trees_url" in attributes: # pragma no branch
self._trees_url = self._makeStringAttribute(attributes["trees_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "watchers" in attributes: # pragma no branch
self._watchers = self._makeIntAttribute(attributes["watchers"])
if "watchers_count" in attributes: # pragma no branch
self._watchers_count = self._makeIntAttribute(attributes["watchers_count"])
| gpl-3.0 |
naresh21/synergetics-edx-platform | openedx/core/djangoapps/user_api/preferences/tests/test_views.py | 21 | 23538 | # -*- coding: utf-8 -*-
"""
Unit tests for preference APIs.
"""
import unittest
import ddt
import json
from mock import patch
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test.testcases import TransactionTestCase
from rest_framework.test import APIClient
from student.tests.factories import UserFactory
from ...accounts.tests.test_views import UserAPITestCase
from ..api import set_user_preference
from .test_api import get_expected_validation_developer_message, get_expected_key_error_user_message
TOO_LONG_PREFERENCE_KEY = u"x" * 256
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesAPI(UserAPITestCase):
"""
Unit tests /api/user/v1/accounts/{username}/
"""
def setUp(self):
super(TestPreferencesAPI, self).setUp()
self.url_endpoint_name = "preferences_api"
self.url = reverse(self.url_endpoint_name, kwargs={'username': self.user.username})
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or PATCH.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_patch(self.anonymous_client, {}, expected_status=401)
def test_unsupported_methods(self):
"""
Test that DELETE, POST, and PUT are not supported.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.post(self.url).status_code)
self.assertEqual(405, self.client.delete(self.url).status_code)
def test_get_different_user(self):
"""
Test that a client (logged in) cannot get the preferences information for a different client.
"""
self.different_client.login(username=self.different_user.username, password=self.test_password)
self.send_get(self.different_client, expected_status=404)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_unknown_user(self, api_client, username):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, username)
response = client.get(reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist"}))
self.assertEqual(404, response.status_code)
def test_get_preferences_default(self):
"""
Test that a client (logged in) can get her own preferences information (verifying the default
state before any preferences are stored).
"""
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_get(self.client)
self.assertEqual({}, response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_preferences(self, api_client, user):
"""
Test that a client (logged in) can get her own preferences information. Also verifies that a "is_staff"
user can get the preferences information for other users.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "time_zone", "Asia/Tokyo")
# Log in the client and do the GET.
client = self.login_client(api_client, user)
response = self.send_get(client)
self.assertEqual({"dict_pref": "{'int_key': 10}", "string_pref": "value", "time_zone": "Asia/Tokyo"},
response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_unknown_user(self, api_client, user):
"""
Test that trying to update preferences for a user who does not exist returns a 404.
"""
client = self.login_client(api_client, user)
response = client.patch(
reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist"}),
data=json.dumps({"string_pref": "value"}), content_type="application/merge-patch+json"
)
self.assertEqual(404, response.status_code)
def test_patch_bad_content_type(self):
"""
Test the behavior of patch when an incorrect content_type is specified.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.send_patch(self.client, {}, content_type="application/json", expected_status=415)
self.send_patch(self.client, {}, content_type="application/xml", expected_status=415)
def test_create_preferences(self):
"""
Test that a client (logged in) can create her own preferences information.
"""
self._do_create_preferences_test(True)
def test_create_preferences_inactive(self):
"""
Test that a client (logged in but not active) can create her own preferences information.
"""
self._do_create_preferences_test(False)
def _do_create_preferences_test(self, is_active):
"""
Internal helper to generalize the creation of a set of preferences
"""
self.client.login(username=self.user.username, password=self.test_password)
if not is_active:
self.user.is_active = False
self.user.save()
self.send_patch(
self.client,
{
"dict_pref": {"int_key": 10},
"string_pref": "value",
},
expected_status=204
)
response = self.send_get(self.client)
self.assertEqual({u"dict_pref": u"{u'int_key': 10}", u"string_pref": u"value"}, response.data)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_create_preferences_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot create preferences for another user.
"""
client = self.login_client(api_client, user)
self.send_patch(
client,
{
"dict_pref": {"int_key": 10},
"string_pref": "value",
},
expected_status=403 if user == "staff_user" else 404,
)
def test_update_preferences(self):
"""
Test that a client (logged in) can update her own preferences information.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
set_user_preference(self.user, "time_zone", "Asia/Macau")
# Send the patch request
self.client.login(username=self.user.username, password=self.test_password)
self.send_patch(
self.client,
{
"string_pref": "updated_value",
"new_pref": "new_value",
"extra_pref": None,
"time_zone": "Europe/London",
},
expected_status=204
)
# Verify that GET returns the updated preferences
response = self.send_get(self.client)
expected_preferences = {
"dict_pref": "{'int_key': 10}",
"string_pref": "updated_value",
"new_pref": "new_value",
"time_zone": "Europe/London",
}
self.assertEqual(expected_preferences, response.data)
def test_update_preferences_bad_data(self):
"""
Test that a client (logged in) receives appropriate errors for a bad update.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
set_user_preference(self.user, "time_zone", "Pacific/Midway")
# Send the patch request
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_patch(
self.client,
{
"string_pref": "updated_value",
TOO_LONG_PREFERENCE_KEY: "new_value",
"new_pref": "new_value",
u"empty_pref_ȻħȺɍłɇs": "",
"time_zone": "Asia/Africa",
},
expected_status=400
)
self.assertTrue(response.data.get("field_errors", None))
field_errors = response.data["field_errors"]
self.assertEquals(
field_errors,
{
TOO_LONG_PREFERENCE_KEY: {
"developer_message": get_expected_validation_developer_message(
TOO_LONG_PREFERENCE_KEY, "new_value"
),
"user_message": get_expected_key_error_user_message(
TOO_LONG_PREFERENCE_KEY, "new_value"
),
},
u"empty_pref_ȻħȺɍłɇs": {
"developer_message": u"Preference 'empty_pref_ȻħȺɍłɇs' cannot be set to an empty value.",
"user_message": u"Preference 'empty_pref_ȻħȺɍłɇs' cannot be set to an empty value.",
},
"time_zone": {
"developer_message": u"Value 'Asia/Africa' not valid for preference 'time_zone': Not in "
u"timezone set.",
"user_message": u"Value 'Asia/Africa' is not a valid time zone selection."
},
}
)
# Verify that GET returns the original preferences
response = self.send_get(self.client)
expected_preferences = {
u"dict_pref": u"{'int_key': 10}",
u"string_pref": u"value",
u"extra_pref": u"extra_value",
u"time_zone": u"Pacific/Midway",
}
self.assertEqual(expected_preferences, response.data)
def test_update_preferences_bad_request(self):
"""
Test that a client (logged in) receives appropriate errors for a bad request.
"""
self.client.login(username=self.user.username, password=self.test_password)
# Verify a non-dict request
response = self.send_patch(self.client, "non_dict_request", expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"No data provided for user preference update",
"user_message": u"No data provided for user preference update"
}
)
# Verify an empty dict request
response = self.send_patch(self.client, {}, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"No data provided for user preference update",
"user_message": u"No data provided for user preference update"
}
)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_update_preferences_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot update preferences for another user.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
# Send the patch request
client = self.login_client(api_client, user)
self.send_patch(
client,
{
"string_pref": "updated_value",
"new_pref": "new_value",
"extra_pref": None,
},
expected_status=403 if user == "staff_user" else 404
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesAPITransactions(TransactionTestCase):
"""
Tests the transactional behavior of the preferences API
"""
test_password = "test"
def setUp(self):
super(TestPreferencesAPITransactions, self).setUp()
self.client = APIClient()
self.user = UserFactory.create(password=self.test_password)
self.url = reverse("preferences_api", kwargs={'username': self.user.username})
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
def test_update_preferences_rollback(self, delete_user_preference):
"""
Verify that updating preferences is transactional when a failure happens.
"""
# Create some test preferences values.
set_user_preference(self.user, "a", "1")
set_user_preference(self.user, "b", "2")
set_user_preference(self.user, "c", "3")
# Send a PATCH request with two updates and a delete. The delete should fail
# after one of the updates has happened, in which case the whole operation
# should be rolled back.
delete_user_preference.side_effect = [Exception, None]
self.client.login(username=self.user.username, password=self.test_password)
json_data = {
"a": "2",
"b": None,
"c": "1",
}
response = self.client.patch(self.url, data=json.dumps(json_data), content_type="application/merge-patch+json")
self.assertEqual(400, response.status_code)
# Verify that GET returns the original preferences
response = self.client.get(self.url)
expected_preferences = {
"a": "1",
"b": "2",
"c": "3",
}
self.assertEqual(expected_preferences, response.data)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesDetailAPI(UserAPITestCase):
"""
Unit tests /api/user/v1/accounts/{username}/{preference_key}
"""
def setUp(self):
super(TestPreferencesDetailAPI, self).setUp()
self.test_pref_key = "test_key"
self.test_pref_value = "test_value"
set_user_preference(self.user, self.test_pref_key, self.test_pref_value)
self.url_endpoint_name = "preferences_detail_api"
self._set_url(self.test_pref_key)
def _set_url(self, preference_key):
"""
Sets the url attribute including the username and provided preference key
"""
self.url = reverse(
self.url_endpoint_name,
kwargs={'username': self.user.username, 'preference_key': preference_key}
)
def test_anonymous_user_access(self):
"""
Test that an anonymous client (logged in) cannot manipulate preferences.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_put(self.anonymous_client, "new_value", expected_status=401)
self.send_delete(self.anonymous_client, expected_status=401)
def test_unsupported_methods(self):
"""
Test that POST and PATCH are not supported.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.assertEqual(405, self.client.post(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
def test_different_user_access(self):
"""
Test that a client (logged in) cannot manipulate a preference for a different client.
"""
self.different_client.login(username=self.different_user.username, password=self.test_password)
self.send_get(self.different_client, expected_status=404)
self.send_put(self.different_client, "new_value", expected_status=404)
self.send_delete(self.different_client, expected_status=404)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_unknown_user(self, api_client, username):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, username)
response = client.get(
reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist", 'preference_key': self.test_pref_key})
)
self.assertEqual(404, response.status_code)
def test_get_preference_does_not_exist(self):
"""
Test that a 404 is returned if the user does not have a preference with the given preference_key.
"""
self._set_url("does_not_exist")
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_get(self.client, expected_status=404)
self.assertIsNone(response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_preference(self, api_client, user):
"""
Test that a client (logged in) can get her own preferences information. Also verifies that a "is_staff"
user can get the preferences information for other users.
"""
client = self.login_client(api_client, user)
response = self.send_get(client)
self.assertEqual(self.test_pref_value, response.data)
# Test a different value.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
self._set_url("dict_pref")
response = self.send_get(client)
self.assertEqual("{'int_key': 10}", response.data)
def test_create_preference(self):
"""
Test that a client (logged in) can create a preference.
"""
self._do_create_preference_test(True)
def test_create_preference_inactive(self):
"""
Test that a client (logged in but not active) can create a preference.
"""
self._do_create_preference_test(False)
def _do_create_preference_test(self, is_active):
"""
Generalization of the actual test workflow
"""
self.client.login(username=self.user.username, password=self.test_password)
if not is_active:
self.user.is_active = False
self.user.save()
self._set_url("new_key")
new_value = "new value"
self.send_put(self.client, new_value)
response = self.send_get(self.client)
self.assertEqual(new_value, response.data)
@ddt.data(
(None,),
("",),
(" ",),
)
@ddt.unpack
def test_create_empty_preference(self, preference_value):
"""
Test that a client (logged in) cannot create an empty preference.
"""
self._set_url("new_key")
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_put(self.client, preference_value, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"Preference 'new_key' cannot be set to an empty value.",
"user_message": u"Preference 'new_key' cannot be set to an empty value."
}
)
self.send_get(self.client, expected_status=404)
def test_create_preference_too_long_key(self):
"""
Test that a client cannot create preferences with bad keys
"""
self.client.login(username=self.user.username, password=self.test_password)
too_long_preference_key = "x" * 256
new_value = "new value"
self._set_url(too_long_preference_key)
response = self.send_put(self.client, new_value, expected_status=400)
self.assertEquals(
response.data,
{
"developer_message": get_expected_validation_developer_message(too_long_preference_key, new_value),
"user_message": get_expected_key_error_user_message(too_long_preference_key, new_value),
}
)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_create_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot create a preference for a different user.
"""
# Verify that a new preference cannot be created
self._set_url("new_key")
client = self.login_client(api_client, user)
new_value = "new value"
self.send_put(client, new_value, expected_status=403 if user == "staff_user" else 404)
@ddt.data(
(u"new value",),
(10,),
({u"int_key": 10},)
)
@ddt.unpack
def test_update_preference(self, preference_value):
"""
Test that a client (logged in) can update a preference.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.send_put(self.client, preference_value)
response = self.send_get(self.client)
self.assertEqual(unicode(preference_value), response.data)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_update_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot update a preference for another user.
"""
client = self.login_client(api_client, user)
new_value = "new value"
self.send_put(client, new_value, expected_status=403 if user == "staff_user" else 404)
@ddt.data(
(None,),
("",),
(" ",),
)
@ddt.unpack
def test_update_preference_to_empty(self, preference_value):
"""
Test that a client (logged in) cannot update a preference to null.
"""
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_put(self.client, preference_value, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"Preference 'test_key' cannot be set to an empty value.",
"user_message": u"Preference 'test_key' cannot be set to an empty value."
}
)
response = self.send_get(self.client)
self.assertEqual(self.test_pref_value, response.data)
def test_delete_preference(self):
"""
Test that a client (logged in) can delete her own preference.
"""
self.client.login(username=self.user.username, password=self.test_password)
# Verify that a preference can be deleted
self.send_delete(self.client)
self.send_get(self.client, expected_status=404)
# Verify that deleting a non-existent preference throws a 404
self.send_delete(self.client, expected_status=404)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_delete_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot delete a preference for another user.
"""
client = self.login_client(api_client, user)
self.send_delete(client, expected_status=403 if user == "staff_user" else 404)
| agpl-3.0 |
JeremyNGalloway/portAuthority | portAuthority.py | 2 | 2475 | import subprocess
import datetime
import logging
import sys
from netaddr import IPNetwork
from time import sleep
netcount = 0
ipcount = 0
date = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
logging.basicConfig(filename='example.log',level=logging.DEBUG)
logging.debug('This message should go to the log file')
logging.info('So should this')
logging.warning('And this, too')
if len(sys.argv) < 2:
print 'Missing argument. Please provide a network list file, and optionally a port\n'
print 'e.g. python portAuthority.py networks.txt 8088\n'
print 'nohup python portAuthority.py networks.txt 8088 > foo.out 2> foo.err < /dev/null &'
exit()
try:
zmapBin = subprocess.check_output(['which', 'zmap']).strip()
print 'zmap was found at:\n' + zmapBin
except:
print 'zmap was not found in your PATH, quitting'
exit()
if len(sys.argv) == 3:
port = sys.argv[2]
else:
port = raw_input('Enter port number to scan\ne.g. 8088\n:')
with open(sys.argv[1], 'r') as networksFile:
for network in networksFile:
netcount = netcount +1
ip = IPNetwork(network)
ipcount = ipcount + ip.size
print 'Total number of networks to be scanned: ' + str(netcount)
print 'Total number of IPs to be scanned: ' + ("{:,}".format(ipcount));
if len(sys.argv) < 3:
choice = raw_input('Would you like to continue? Y/n ').lower()
if 'n' in choice:
exit()
with open(sys.argv[1], 'r') as networksFile:
for network in networksFile:
netname = network[0:8].strip('.')
outFile = '{0}.{1}.{2}.zmap'.format(date, port, netname)
zmapOptions = ' -p {0} -o {1} -B 1M -s 53 -v 2 '.format(port, outFile)
fullCommand = 'nohup ' + zmapBin + zmapOptions + network.strip()
print fullCommand + ' RUNNING \n'
try:
subprocess.check_output(fullCommand, shell=True)
time.sleep(8)
except subprocess.CalledProcessError as e:
logging.warning(e.output)
continue
except Exception, e:
logging.warning(e.output)
networksFile.close()
print 'Scanning complete\n'
print 'find *.zmap | grep ' + port + ' | xargs cat | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n | uniq'
exit() | gpl-2.0 |
brennie/reviewboard | reviewboard/oauth/forms.py | 1 | 11912 | """Forms for OAuth2 applications."""
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from django.forms import widgets
from django.utils.translation import ugettext, ugettext_lazy as _
from djblets.forms.widgets import CopyableTextInput, ListEditWidget
from oauth2_provider.generators import (generate_client_id,
generate_client_secret)
from oauth2_provider.validators import URIValidator
from reviewboard.admin.form_widgets import RelatedUserWidget
from reviewboard.oauth.models import Application
from reviewboard.oauth.widgets import OAuthSecretInputWidget
from reviewboard.site.urlresolvers import local_site_reverse
class ApplicationChangeForm(forms.ModelForm):
"""A form for updating an Application.
This form is intended to be used by the admin site.
"""
DISABLED_FOR_SECURITY_ERROR = _(
'This Application has been disabled to keep your server secure. '
'It cannot be re-enabled until its client secret changes.'
)
client_id = forms.CharField(
label=_('Client ID'),
help_text=_(
'The client ID. Your application will use this in OAuth2 '
'authentication to identify itself.',
),
widget=CopyableTextInput(attrs={
'readonly': True,
'size': 100,
}),
required=False,
)
def __init__(self, data=None, initial=None, instance=None):
"""Initialize the form:
Args:
data (dict, optional):
The provided form data.
initial (dict, optional):
The initial form values.
instance (Application, optional):
The application to edit.
"""
super(ApplicationChangeForm, self).__init__(data=data,
initial=initial,
instance=instance)
if instance and instance.pk:
# If we are creating an application (as the
# ApplicationCreationForm is a subclass of this class), the
# client_secret wont be present so we don't have to initialize the
# widget.
client_secret = self.fields['client_secret']
client_secret.widget = OAuthSecretInputWidget(
attrs=client_secret.widget.attrs,
api_url=local_site_reverse('oauth-app-resource',
local_site=instance.local_site,
kwargs={'app_id': instance.pk}),
)
def clean_extra_data(self):
"""Prevent ``extra_data`` from being an empty string.
Returns:
unicode:
Either a non-zero length string of JSON-encoded data or ``None``.
"""
return self.cleaned_data['extra_data'] or None
def clean_redirect_uris(self):
"""Clean the ``redirect_uris`` field.
This method will ensure that all the URIs are valid by validating
each of them, as well as removing unnecessary whitespace.
Returns:
unicode:
A space-separated list of URIs.
Raises:
django.core.exceptions.ValidationError:
Raised when one or more URIs are invalid.
"""
validator = URIValidator()
redirect_uris = self.cleaned_data.get('redirect_uris', '').split()
errors = []
for uri in redirect_uris:
try:
validator(uri)
except ValidationError as e:
errors.append(e)
if errors:
raise ValidationError(errors)
# We join the list instead of returning the initial value because the
# the original value may have had multiple adjacent whitespace
# characters.
return ' '.join(redirect_uris)
def clean(self):
"""Validate the form.
This will validate the relationship between the
``authorization_grant_type`` and ``redirect_uris`` fields to ensure the
values are compatible.
This method is very similar to
:py:func:`Application.clean
<oauth2_provider.models.AbstractApplication.clean>`, but the data will
be verified by the form instead of the model to allow error messages to
be usable by consumers of the form.
This method does not raise an exception upon failing validation.
Instead, it sets errors internally so that they are related to the
pertinent field instead of the form as a whole.
Returns:
dict:
The cleaned form data.
"""
super(ApplicationChangeForm, self).clean()
grant_type = self.cleaned_data.get('authorization_grant_type')
# redirect_uris will not be present in cleaned_data if validation
# failed.
redirect_uris = self.cleaned_data.get('redirect_uris')
if (redirect_uris is not None and
len(redirect_uris) == 0 and
grant_type in (Application.GRANT_AUTHORIZATION_CODE,
Application.GRANT_IMPLICIT)):
# This is unfortunately not publicly exposed in Django 1.6, but it
# is exposed in later versions (as add_error).
self._errors['redirect_uris'] = self.error_class([
ugettext(
'The "redirect_uris" field may not be blank when '
'"authorization_grant_type" is "%s"'
)
% grant_type
])
self.cleaned_data.pop('redirect_uris')
if (self.instance and
self.instance.pk and
self.instance.is_disabled_for_security and
self.cleaned_data['enabled']):
raise ValidationError(self.DISABLED_FOR_SECURITY_ERROR)
if 'client_id' in self.cleaned_data:
del self.cleaned_data['client_id']
if 'client_secret' in self.cleaned_data:
del self.cleaned_data['client_secret']
return self.cleaned_data
class Meta:
model = Application
fields = '__all__'
help_texts = {
'authorization_grant_type': _(
'How the authorization is granted to the application.'
),
'client_secret': _(
'The client secret. This should only be known to Review Board '
'and your application.'
),
'client_type': _(
"The type of client. Confidential clients must be able to "
"keep users' passwords secure."
),
'name': _(
'The application name.'
),
'redirect_uris': _(
'A list of allowed URIs to redirect to.',
),
'skip_authorization': _(
'Whether or not users will be prompted for authentication. '
'This should most likely be unchecked.'
),
'user': _(
'The user who created the application. The selected user will '
'be able to change these settings from their account settings.'
),
}
widgets = {
'client_secret': CopyableTextInput(attrs={
'readonly': True,
'size': 100,
}),
'name': widgets.TextInput(attrs={'size': 60}),
'redirect_uris': ListEditWidget(attrs={'size': 60}, sep=' '),
'user': RelatedUserWidget(multivalued=False),
'original_user': RelatedUserWidget(multivalued=False),
}
labels = {
'authorization_grant_type': _('Authorization Grant Type'),
'client_secret': _('Client Secret'),
'client_type': _('Client Type'),
'name': _('Name'),
'redirect_uris': _('Redirect URIs'),
'skip_authorization': _('Skip Authorization'),
'user': _('User'),
}
class ApplicationCreationForm(ApplicationChangeForm):
"""A form for creating an Application.
This is meant to be used by the admin site.
"""
def save(self, commit=True):
"""Save the form.
This method will generate the ``client_id`` and ``client_secret``
fields.
Args:
commit (bool, optional):
Whether or not the Application should be saved to the database.
Returns:
reviewboard.oauth.models.Application:
The created Application.
"""
instance = super(ApplicationCreationForm, self).save(commit=False)
instance.client_id = generate_client_id()
instance.client_secret = generate_client_secret()
if commit:
instance.save()
return instance
class Meta(ApplicationChangeForm.Meta):
exclude = (
'client_id',
'client_secret',
)
class UserApplicationChangeForm(ApplicationChangeForm):
"""A form for an end user to change an Application."""
def __init__(self, user, data=None, initial=None, instance=None):
"""Initialize the form.
Args:
user (django.contrib.auth.models.User):
The user changing the form. Ignored, but included to match
:py:meth:`UserApplicationCreationForm.__init__`.
data (dict):
The provided data.
initial (dict, optional):
The initial form values.
instance (reviewboard.oauth.models.Application):
The Application that is to be edited.
"""
super(UserApplicationChangeForm, self).__init__(data=data,
initial=initial,
instance=instance)
class Meta(ApplicationChangeForm.Meta):
exclude = (
'extra_data',
'local_site',
'original_user',
'skip_authorization',
'user',
)
class UserApplicationCreationForm(ApplicationCreationForm):
"""A form for an end user to update an Application."""
def __init__(self, user, data, initial=None, instance=None):
"""Initialize the form.
Args:
user (django.contrib.auth.models.User):
The user changing the form. Ignored, but included to match
:py:meth:`UserApplicationCreationForm.__init__`.
data (dict):
The provided data.
initial (dict, optional):
The initial form values.
instance (reviewboard.oauth.models.Application, optional):
The Application that is to be edited.
This should always be ``None``.
"""
assert instance is None
super(UserApplicationCreationForm, self).__init__(data=data,
initial=initial,
instance=instance)
self.user = user
def save(self, commit=True):
"""Save the form.
This method will associate the user creating the application as its
owner.
Args:
commit (bool, optional):
Whether or not the Application should be saved to the database.
Returns:
reviewboard.oauth.models.Application:
The created Application.
"""
instance = super(UserApplicationCreationForm, self).save(commit=False)
instance.user = self.user
if commit:
instance.save()
return instance
class Meta(ApplicationCreationForm.Meta):
exclude = (ApplicationCreationForm.Meta.exclude +
UserApplicationChangeForm.Meta.exclude)
| mit |
LiuLang/bcloud | bcloud/util.py | 10 | 6706 |
# Copyright (C) 2014-2015 LiuLang <gsushzhsosgsu@gmail.com>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
import base64
import datetime
import hashlib
import json
import os
import random
import re
import traceback
import urllib.parse
import time
from bcloud.const import ValidatePathState
from bcloud.log import logger
try:
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
except (ImportError, ValueError):
logger.error(traceback.format_exc())
SIZE_K = 2 ** 10
SIZE_M = 2 ** 20
SIZE_G = 2 ** 30
SIZE_T = 2 ** 40
def timestamp():
'''返回当前的时间标记, 以毫秒为单位'''
return str(int(time.time() * 1000))
def curr_time():
now = datetime.datetime.now()
return datetime.datetime.strftime(now, '%Y%m%d%H%M%S')
def latency():
'''返回操作时消耗的时间.
这个值是0.1-1之前的五位小数, 用于跟踪服务器的响应时间.
我们需要随机生成它.
'''
return str(random.random())[:7]
def get_human_size(size, use_giga=True):
'''将文件大小由byte, 转为人类可读的字符串
size - 整数, 文件的大小, 以byte为单位
use_giga - 如果这个选项为False, 那最大的单位就是MegaBytes, 而不会用到
GigaBytes, 这个在显示下载进度时很有用, 因为可以动态的显示下载
状态.
'''
size_kb = '{0:,}'.format(size)
if size < SIZE_K:
return ('{0} B'.format(size), size_kb)
if size < SIZE_M:
return ('{0:.1f} kB'.format(size / SIZE_K), size_kb)
if size < SIZE_G or not use_giga:
return ('{0:.1f} MB'.format(size / SIZE_M), size_kb)
if size < SIZE_T:
return ('{0:.1f} GB'.format(size / SIZE_G), size_kb)
return ('{0:.1f} TB'.format(size / SIZE_T), size_kb)
def get_delta_days(from_sec, to_sec):
'''计算两个时间节点之间的日期'''
seconds = abs(to_sec - from_sec)
delta = datetime.timedelta(seconds=seconds)
return delta.days
def get_human_time(t):
'''将时间标记转换成字符串'''
if isinstance(t, int):
# ignore micro seconds
if len(str(t)) == 13:
t = t // 1000
t = datetime.datetime.fromtimestamp(t)
return datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
def get_recent_mtime(t):
'''获取更精简的时间.
如果是当天的, 就返回时间; 如果是当年的, 就近回月份和日期; 否则返回完整的时间
'''
if isinstance(t, int):
# ignore micro seconds
if len(str(t)) == 13:
t = t // 1000
t = datetime.datetime.fromtimestamp(t)
now = datetime.datetime.now()
delta = now - t
if delta.days == 0:
return datetime.datetime.strftime(t, '%H:%M:%S')
elif now.year == t.year:
return datetime.datetime.strftime(t, '%b %d')
else:
return datetime.datetime.strftime(t, '%b %d %Y')
def rec_split_path(path):
'''将一个路径进行分隔, 分别得到每父母的绝对路径及目录名'''
if len(path) > 1 and path.endswith('/'):
path = path[:-1]
if '/' not in path:
return [path,]
result = []
while path != '/':
parent, name = os.path.split(path)
result.append((path, name))
path = parent
result.append(('/', '/'))
result.reverse()
return result
def list_remove_by_index(l, index):
'''将list中的index位的数据删除'''
if index < 0 or index >= len(l):
raise ValueError('index out of range')
if index == (len(l) - 1):
l.pop()
elif index == 0:
l = l[1:]
else:
l = l[0:index] + l[index+1:]
return l
def uri_to_path(uri):
if not uri or len(uri) < 7:
return ''
return urllib.parse.unquote(uri[7:])
def uris_to_paths(uris):
'''将一串URI地址转为绝对路径, 用于处理桌面程序中的文件拖放'''
source_paths = []
for uri in uris:
source_path = uri_to_path(uri)
if source_path:
source_paths.append(source_path)
return source_paths
def natsort(string):
'''按照语言里的意义对字符串进行排序.
这个方法用于替换按照字符编码顺序对字符串进行排序.
相关链接:
http://stackoverflow.com/questions/2545532/python-analog-of-natsort-function-sort-a-list-using-a-natural-order-algorithm
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
'''
return [int(s) if s.isdigit() else s for s in re.split('(\d+)', string)]
def RSA_encrypt(public_key, message):
'''用RSA加密字符串.
public_key - 公钥
message - 要加密的信息, 使用UTF-8编码的字符串
@return - 使用base64编码的字符串
'''
# 如果没能成功导入RSA模块, 就直接返回空白字符串.
if not globals().get('RSA'):
return ''
rsakey = RSA.importKey(public_key)
rsakey = PKCS1_v1_5.new(rsakey)
encrypted = rsakey.encrypt(message.encode())
return base64.encodestring(encrypted).decode().replace('\n', '')
def m3u8_to_m3u(pls):
output = ['#EXTM3U']
srcs_set = set()
for line in pls.decode().split('\n'):
if line.startswith('#') or not line:
continue
src = line[line.find('src='):]
url = line[:line.find('start=')] + src
if src not in srcs_set:
srcs_set.add(src)
output.append(url)
return '\n'.join(output)
def json_loads_single(s):
'''处理不标准JSON结构化数据'''
try:
return json.loads(s.replace("'", '"').replace('\t', ''))
except (ValueError, UnicodeDecodeError):
logger.error(traceback.format_exc())
return None
def validate_pathname(filepath):
'''检查路径中是否包含特殊字符.
百度网盘对路径/文件名的要求很严格:
1. 路径长度限制为1000
2. 路径中不能包含以下字符:\\ ? | " > < : *
3. 文件名或路径名开头结尾不能是“.”或空白字符,空白字符包括: \r, \n, \t, 空格, \0, \x0B
@return, 返回的状态码: 0 表示正常
'''
if filepath == '/':
return ValidatePathState.OK
if len(filepath) > 1000:
return ValidatePathState.LENGTH_ERROR
filter2 = '\\?|"><:*'
for c in filter2:
if c in filepath:
return ValidatePathState.CHAR_ERROR2
paths = rec_split_path(filepath)
filter3 = '.\r\n\t \0\x0b'
for path in paths:
if path[0] in filter3 or path[-1] in filter3:
return ValidatePathState.CHAR_ERROR3
return ValidatePathState.OK
| gpl-3.0 |
d10genes/gensim | gensim/models/phrases.py | 17 | 10461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automatically detect common phrases (multiword expressions) from a stream of sentences.
The phrases are collocations (frequently co-occurring tokens). See [1]_ for the
exact formula.
For example, if your input stream (=an iterable, with each value a list of token strings) looks like:
>>> print(list(sentence_stream))
[[u'the', u'mayor', u'of', u'new', u'york', u'was', u'there'],
[u'machine', u'learning', u'can', u'be', u'useful', u'sometimes'],
...,
]
you'd train the detector with:
>>> bigram = Phrases(sentence_stream)
and then transform any sentence (list of token strings) using the standard gensim syntax:
>>> sent = [u'the', u'mayor', u'of', u'new', u'york', u'was', u'there']
>>> print(bigram[sent])
[u'the', u'mayor', u'of', u'new_york', u'was', u'there']
(note `new_york` became a single token). As usual, you can also transform an entire
sentence stream using:
>>> print(list(bigram[any_sentence_stream]))
[[u'the', u'mayor', u'of', u'new_york', u'was', u'there'],
[u'machine_learning', u'can', u'be', u'useful', u'sometimes'],
...,
]
You can also continue updating the collocation counts with new sentences, by:
>>> bigram.add_vocab(new_sentence_stream)
These **phrase streams are meant to be used during text preprocessing, before
converting the resulting tokens into vectors using `Dictionary`**. See the
:mod:`gensim.models.word2vec` module for an example application of using phrase detection.
The detection can also be **run repeatedly**, to get phrases longer than
two tokens (e.g. `new_york_times`):
>>> trigram = Phrases(bigram[sentence_stream])
>>> sent = [u'the', u'new', u'york', u'times', u'is', u'a', u'newspaper']
>>> print(trigram[bigram[sent]])
[u'the', u'new_york_times', u'is', u'a', u'newspaper']
.. [1] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
"""
import sys
import os
import logging
from collections import defaultdict
from six import iteritems, string_types
from gensim import utils, interfaces
logger = logging.getLogger(__name__)
class Phrases(interfaces.TransformationABC):
"""
Detect phrases, based on collected collocation counts. Adjacent words that appear
together more frequently than expected are joined together with the `_` character.
It can be used to generate phrases on the fly, using the `phrases[sentence]`
and `phrases[corpus]` syntax.
"""
def __init__(self, sentences=None, min_count=5, threshold=10.0,
max_vocab_size=40000000, delimiter=b'_'):
"""
Initialize the model from an iterable of `sentences`. Each sentence must be
a list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider a generator that streams the sentences directly from disk/network,
without storing everything in RAM. See :class:`BrownCorpus`,
:class:`Text8Corpus` or :class:`LineSentence` in the :mod:`gensim.models.word2vec`
module for such examples.
`min_count` ignore all words and bigrams with total collected count lower
than this.
`threshold` represents a threshold for forming the phrases (higher means
fewer phrases). A phrase of words `a` and `b` is accepted if
`(cnt(a, b) - min_count) * N / (cnt(a) * cnt(b)) > threshold`, where `N` is the
total vocabulary size.
`max_vocab_size` is the maximum size of the vocabulary. Used to control
pruning of less common words, to keep memory under control. The default
of 40M needs about 3.6GB of RAM; increase/decrease `max_vocab_size` depending
on how much available memory you have.
`delimiter` is the glue character used to join collocation tokens, and
should be a byte string (e.g. b'_').
"""
if min_count <= 0:
raise ValueError("min_count should be at least 1")
if threshold <= 0:
raise ValueError("threshold should be positive")
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.vocab = defaultdict(int) # mapping between utf8 token => its count
self.min_reduce = 1 # ignore any tokens with count smaller than this
self.delimiter = delimiter
if sentences is not None:
self.add_vocab(sentences)
def __str__(self):
"""Get short string representation of this phrase detector."""
return "%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>" % (
self.__class__.__name__, len(self.vocab), self.min_count,
self.threshold, self.max_vocab_size)
@staticmethod
def learn_vocab(sentences, max_vocab_size, delimiter=b'_'):
"""Collect unigram/bigram counts from the `sentences` iterable."""
sentence_no = -1
total_words = 0
logger.info("collecting all words and their counts")
vocab = defaultdict(int)
min_reduce = 1
for sentence_no, sentence in enumerate(sentences):
if sentence_no % 10000 == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words and %i word types" %
(sentence_no, total_words, len(vocab)))
sentence = [utils.any2utf8(w) for w in sentence]
for bigram in zip(sentence, sentence[1:]):
vocab[bigram[0]] += 1
vocab[delimiter.join(bigram)] += 1
total_words += 1
if sentence: # add last word skipped by previous loop
word = sentence[-1]
vocab[word] += 1
if len(vocab) > max_vocab_size:
utils.prune_vocab(vocab, min_reduce)
min_reduce += 1
logger.info("collected %i word types from a corpus of %i words (unigram + bigrams) and %i sentences" %
(len(vocab), total_words, sentence_no + 1))
return min_reduce, vocab
def add_vocab(self, sentences):
"""
Merge the collected counts `vocab` into this phrase detector.
"""
# uses a separate vocab to collect the token counts from `sentences`.
# this consumes more RAM than merging new sentences into `self.vocab`
# directly, but gives the new sentences a fighting chance to collect
# sufficient counts, before being pruned out by the (large) accummulated
# counts collected in previous learn_vocab runs.
min_reduce, vocab = self.learn_vocab(sentences, self.max_vocab_size, self.delimiter)
logger.info("merging %i counts into %s", len(vocab), self)
self.min_reduce = max(self.min_reduce, min_reduce)
for word, count in iteritems(vocab):
self.vocab[word] += count
if len(self.vocab) > self.max_vocab_size:
utils.prune_vocab(self.vocab, self.min_reduce)
self.min_reduce += 1
logger.info("merged %s", self)
def __getitem__(self, sentence):
"""
Convert the input tokens `sentence` (=list of unicode strings) into phrase
tokens (=list of unicode strings, where detected phrases are joined by u'_').
If `sentence` is an entire corpus (iterable of sentences rather than a single
sentence), return an iterable that converts each of the corpus' sentences
into phrases on the fly, one after another.
Example::
>>> sentences = Text8Corpus(path_to_corpus)
>>> bigram = Phrases(sentences, min_count=5, threshold=100)
>>> for sentence in phrases[sentences]:
... print(u' '.join(s))
he refuted nechaev other anarchists sometimes identified as pacifist anarchists advocated complete
nonviolence leo_tolstoy
"""
try:
is_single = not sentence or isinstance(sentence[0], string_types)
except:
is_single = False
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
return self._apply(sentence)
s, new_s = [utils.any2utf8(w) for w in sentence], []
last_bigram = False
vocab = self.vocab
threshold = self.threshold
delimiter = self.delimiter
min_count = self.min_count
for word_a, word_b in zip(s, s[1:]):
if word_a in vocab and word_b in vocab:
bigram_word = delimiter.join((word_a, word_b))
if bigram_word in vocab and not last_bigram:
pa = float(vocab[word_a])
pb = float(vocab[word_b])
pab = float(vocab[bigram_word])
score = (pab - min_count) / pa / pb * len(vocab)
# logger.debug("score for %s: (pab=%s - min_count=%s) / pa=%s / pb=%s * vocab_size=%s = %s",
# bigram_word, pab, self.min_count, pa, pb, len(self.vocab), score)
if score > threshold:
new_s.append(bigram_word)
last_bigram = True
continue
if not last_bigram:
new_s.append(word_a)
last_bigram = False
if s: # add last word skipped by previous loop
last_token = s[-1]
if not last_bigram:
new_s.append(last_token)
return [utils.to_unicode(w) for w in new_s]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
infile = sys.argv[1]
from gensim.models import Phrases # for pickle
from gensim.models.word2vec import Text8Corpus
sentences = Text8Corpus(infile)
# test_doc = LineSentence('test/test_data/testcorpus.txt')
bigram = Phrases(sentences, min_count=5, threshold=100)
for s in bigram[sentences]:
print(utils.to_utf8(u' '.join(s)))
| gpl-3.0 |
playfulgod/kernel-M865 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
TAKEALOT/Diamond | src/diamond/test/testcollector.py | 24 | 1083 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import unittest
import configobj
from diamond.collector import Collector
class BaseCollectorTest(unittest.TestCase):
def test_SetCustomHostname(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'custom.localhost',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
def test_SetHostnameViaShellCmd(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'echo custom.localhost',
'hostname_method': 'shell',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
| mit |
frankrousseau/weboob | modules/adecco/module.py | 7 | 17064 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.ordereddict import OrderedDict
from weboob.tools.value import Value
from weboob.capabilities.job import CapJob
from .browser import AdeccoBrowser
from .job import AdeccoJobAdvert
__all__ = ['AdeccoModule']
class AdeccoModule(Module, CapJob):
NAME = 'adecco'
DESCRIPTION = u'adecco website'
MAINTAINER = u'Bezleputh'
EMAIL = 'carton_ben@yahoo.fr'
VERSION = '1.1'
BROWSER = AdeccoBrowser
publicationDate_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'000000': u'-- Indifférent --',
'1': u'Moins de 48 heures',
'2': u'Moins de 1 semaine',
'4': u'Moins de 2 semaines',
'3': u'Moins de 5 semaines',
}.iteritems())])
type_contract_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'000000': u'--Indifferent--',
'1': u'CDD',
'2': u'CDI',
'3': u'Intérim',
'4': u'Emploi formation',
'5': u'Emploi saisonnier',
'6': u'Stage',
'7': u'Autre',
}.iteritems())])
places_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'100|REGION_0|DEPARTEMENT_0': u'--Indifferent--',
'101|REGION_1': u'Alsace',
'102|REGION_1|DEPARTEMENT_1': u'-- Rhin (Bas) (67)',
'103|REGION_1|DEPARTEMENT_2': u'-- Rhin (Haut) (68)',
'104|REGION_2': u'Aquitaine',
'105|REGION_2|DEPARTEMENT_3': u'-- Dordogne (24)',
'106|REGION_2|DEPARTEMENT_4': u'-- Gironde (33)',
'107|REGION_2|DEPARTEMENT_5': u'-- Landes (40)',
'108|REGION_2|DEPARTEMENT_6': u'-- Lot et Garonne (47)',
'109|REGION_2|DEPARTEMENT_7': u'-- Pyrénées Atlantiques (64)',
'110|REGION_3': u'Auvergne',
'111|REGION_3|DEPARTEMENT_8': u'-- Allier (03)',
'112|REGION_3|DEPARTEMENT_9': u'-- Cantal (15)',
'113|REGION_3|DEPARTEMENT_10': u'-- Loire (Haute) (43)',
'114|REGION_3|DEPARTEMENT_11': u'-- Puy de Dôme (63)',
'115|REGION_5': u'Bourgogne',
'116|REGION_5|DEPARTEMENT_15': u'-- Côte d\'Or (21)',
'117|REGION_5|DEPARTEMENT_16': u'-- Nièvre (58)',
'118|REGION_5|DEPARTEMENT_17': u'-- Saône et Loire (71)',
'119|REGION_5|DEPARTEMENT_18': u'-- Yonne (89)',
'120|REGION_6': u'Bretagne',
'121|REGION_6|DEPARTEMENT_19': u'-- Côtes d\'Armor (22)',
'122|REGION_6|DEPARTEMENT_20': u'-- Finistère (29)',
'123|REGION_6|DEPARTEMENT_21': u'-- Ille et Vilaine (35)',
'124|REGION_6|DEPARTEMENT_22': u'-- Morbihan (56)',
'125|REGION_7': u'Centre',
'126|REGION_7|DEPARTEMENT_23': u'-- Cher (18)',
'127|REGION_7|DEPARTEMENT_24': u'-- Eure et Loir (28)',
'128|REGION_7|DEPARTEMENT_25': u'-- Indre (36)',
'129|REGION_7|DEPARTEMENT_26': u'-- Indre et Loire (37)',
'130|REGION_7|DEPARTEMENT_27': u'-- Loir et Cher (41)',
'131|REGION_7|DEPARTEMENT_28': u'-- Loiret (45)',
'132|REGION_8': u'Champagne Ardenne',
'133|REGION_8|DEPARTEMENT_29': u'-- Ardennes (08)',
'134|REGION_8|DEPARTEMENT_30': u'-- Aube (10)',
'135|REGION_8|DEPARTEMENT_31': u'-- Marne (51)',
'136|REGION_8|DEPARTEMENT_32': u'-- Marne (Haute) (52)',
'137|REGION_9': u'Corse',
'138|REGION_9|DEPARTEMENT_33': u'-- Corse du Sud (2A)',
'139|REGION_9|DEPARTEMENT_34': u'-- Haute Corse (2B)',
'140|REGION_11': u'Franche Comté',
'141|REGION_11|DEPARTEMENT_43': u'-- Belfort (Territoire de) (90)',
'142|REGION_11|DEPARTEMENT_40': u'-- Doubs (25)',
'143|REGION_11|DEPARTEMENT_41': u'-- Jura (39)',
'144|REGION_11|DEPARTEMENT_42': u'-- Saône (Haute) (70)',
'145|REGION_13': u'Ile de France',
'146|REGION_13|DEPARTEMENT_49': u'-- Essonne (91)',
'147|REGION_13|DEPARTEMENT_50': u'-- Hauts de Seine (92)',
'148|REGION_13|DEPARTEMENT_46': u'-- Paris (Dept.) (75)',
'149|REGION_13|DEPARTEMENT_51': u'-- Seine Saint Denis (93)',
'150|REGION_13|DEPARTEMENT_47': u'-- Seine et Marne (77)',
'151|REGION_13|DEPARTEMENT_53': u'-- Val d\'Oise (95)',
'152|REGION_13|DEPARTEMENT_52': u'-- Val de Marne (94)',
'153|REGION_13|DEPARTEMENT_48': u'-- Yvelines (78)',
'154|REGION_14': u'Languedoc Roussillon',
'155|REGION_14|DEPARTEMENT_54': u'-- Aude (11)',
'156|REGION_14|DEPARTEMENT_55': u'-- Gard (30)',
'157|REGION_14|DEPARTEMENT_56': u'-- Hérault (34)',
'158|REGION_14|DEPARTEMENT_57': u'-- Lozère (48)',
'159|REGION_14|DEPARTEMENT_58': u'-- Pyrénées Orientales (66)',
'160|REGION_15': u'Limousin',
'161|REGION_15|DEPARTEMENT_59': u'-- Corrèze (19)',
'162|REGION_15|DEPARTEMENT_60': u'-- Creuse (23)',
'163|REGION_15|DEPARTEMENT_61': u'-- Vienne (Haute) (87)',
'164|REGION_16': u'Lorraine',
'165|REGION_16|DEPARTEMENT_62': u'-- Meurthe et Moselle (54)',
'166|REGION_16|DEPARTEMENT_63': u'-- Meuse (55)',
'167|REGION_16|DEPARTEMENT_64': u'-- Moselle (57)',
'168|REGION_16|DEPARTEMENT_65': u'-- Vosges (88)',
'169|REGION_17': u'Midi Pyrénées',
'170|REGION_17|DEPARTEMENT_66': u'-- Ariège (09)',
'171|REGION_17|DEPARTEMENT_67': u'-- Aveyron (12)',
'172|REGION_17|DEPARTEMENT_68': u'-- Garonne (Haute) (31)',
'173|REGION_17|DEPARTEMENT_69': u'-- Gers (32)',
'174|REGION_17|DEPARTEMENT_70': u'-- Lot (46)',
'175|REGION_17|DEPARTEMENT_71': u'-- Pyrénées (Hautes) (65)',
'176|REGION_17|DEPARTEMENT_72': u'-- Tarn (81)',
'177|REGION_17|DEPARTEMENT_73': u'-- Tarn et Garonne (82)',
'178|REGION_18': u'Nord Pas de Calais',
'179|REGION_18|DEPARTEMENT_74': u'-- Nord (59)',
'180|REGION_18|DEPARTEMENT_75': u'-- Pas de Calais (62)',
'181|REGION_4': u'Normandie (Basse)',
'182|REGION_4|DEPARTEMENT_12': u'-- Calvados (14)',
'183|REGION_4|DEPARTEMENT_13': u'-- Manche (50)',
'184|REGION_4|DEPARTEMENT_14': u'-- Orne (61)',
'185|REGION_12': u'Normandie (Haute)',
'186|REGION_12|DEPARTEMENT_44': u'-- Eure (27)',
'187|REGION_12|DEPARTEMENT_47': u'-- Seine Maritime (76)',
'188|REGION_19': u'Pays de la Loire',
'189|REGION_19|DEPARTEMENT_76': u'-- Loire Atlantique (44)',
'190|REGION_19|DEPARTEMENT_77': u'-- Maine et Loire (49)',
'191|REGION_19|DEPARTEMENT_78': u'-- Mayenne (53)',
'192|REGION_19|DEPARTEMENT_79': u'-- Sarthe (72)',
'193|REGION_19|DEPARTEMENT_80': u'-- Vendée (85)',
'194|REGION_20': u'Picardie',
'195|REGION_20|DEPARTEMENT_81': u'-- Aisne (02)',
'196|REGION_20|DEPARTEMENT_83': u'-- Oise (60)',
'197|REGION_20|DEPARTEMENT_84': u'-- Somme (80)',
'198|REGION_21': u'Poitou Charentes',
'199|REGION_21|DEPARTEMENT_85': u'-- Charente (16)',
'200|REGION_21|DEPARTEMENT_86': u'-- Charente Maritime (17)',
'201|REGION_21|DEPARTEMENT_87': u'-- Sèvres (Deux) (79)',
'202|REGION_21|DEPARTEMENT_88': u'-- Vienne (86)',
'203|REGION_22': u'Provence Alpes Côte d\'Azur',
'204|REGION_22|DEPARTEMENT_90': u'-- Alpes (Hautes) (05)',
'205|REGION_22|DEPARTEMENT_91': u'-- Alpes Maritimes (06)',
'206|REGION_22|DEPARTEMENT_89': u'-- Alpes de Haute Provence (04)',
'207|REGION_22|DEPARTEMENT_92': u'-- Bouches du Rhône (13)',
'208|REGION_22|DEPARTEMENT_93': u'-- Var (83)',
'209|REGION_22|DEPARTEMENT_94': u'-- Vaucluse (84)',
'210|REGION_23': u'Rhône Alpes',
'211|REGION_23|DEPARTEMENT_95': u'-- Ain (01)',
'212|REGION_23|DEPARTEMENT_96': u'-- Ardèche (07)',
'213|REGION_23|DEPARTEMENT_97': u'-- Drôme (26)',
'214|REGION_23|DEPARTEMENT_98': u'-- Isère (38)',
'215|REGION_23|DEPARTEMENT_99': u'-- Loire (42)',
'216|REGION_23|DEPARTEMENT_100': u'-- Rhône (69)',
'217|REGION_23|DEPARTEMENT_101': u'-- Savoie (73)',
'218|REGION_23|DEPARTEMENT_102': u'-- Savoie (Haute) (74)',
'219|REGION_10': u'DOM TOM',
'220|REGION_10|DEPARTEMENT_35': u'-- Guadeloupe (971)',
'221|REGION_10|DEPARTEMENT_37': u'-- Guyane (973)',
'222|REGION_10|DEPARTEMENT_38': u'-- La Réunion (974)',
'223|REGION_10|DEPARTEMENT_36': u'-- Martinique (972)',
'224|REGION_10|DEPARTEMENT_108': u'-- Mayotte (976)',
'225|REGION_10|DEPARTEMENT_109': u'-- Nouvelle Calédonie (988)',
'226|REGION_10|DEPARTEMENT_108': u'-- Polynésie (987)',
'227|REGION_10|DEPARTEMENT_107': u'-- Saint Pierre et Miquelon (975)',
'228|REGION_24': u'International',
'229|REGION_24|DEPARTEMENT_104': u'-- Andorre',
'230|REGION_24|DEPARTEMENT_105': u'-- Monaco',
'231|REGION_24|DEPARTEMENT_106': u'-- Suisse',
}.iteritems())])
activityDomain_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'100|DOMAIN_0': u'Tous domaines d\'activité',
'101|DOMAIN_1': u'Accueil - Secrétariat - Fonctions Administratives',
'102|DOMAIN_1|ACTIVITY_1': u'-- Accueil',
'103|DOMAIN_1|ACTIVITY_2': u'-- Secrétariat - Assistanat',
'104|DOMAIN_1|ACTIVITY_3': u'-- Autres Fonctions Administratives',
'105|DOMAIN_2': u'Achats - Juridique - Qualité - RH - Direction',
'106|DOMAIN_2|ACTIVITY_4': u'-- Achats ',
'107|DOMAIN_2|ACTIVITY_5': u'-- Juridique',
'108|DOMAIN_2|ACTIVITY_6': u'-- Qualité',
'109|DOMAIN_2|ACTIVITY_7': u'Ressources Humaines - Formation',
'110|DOMAIN_2|ACTIVITY_8': u'-- Direction Générale',
'111|DOMAIN_3': u'Agriculture - Viticulture - Pêche - Espaces Verts',
'112|DOMAIN_3|ACTIVITY_9': u'-- Agriculture - Viticulture - Pêche ',
'113|DOMAIN_3|ACTIVITY_10': u'-- Espaces Verts - Exploitation Forestière',
'114|DOMAIN_4': u'Automobile',
'115|DOMAIN_5': u'Banque - Finance - Gestion Comptabilité - Assurance',
'116|DOMAIN_5|ACTIVITY_11': u'-- Banque - Finance ',
'117|DOMAIN_5|ACTIVITY_12': u'-- Gestion - Comptabilité',
'118|DOMAIN_5|ACTIVITY_13': u'-- Assurance',
'119|DOMAIN_6': u'Bâtiment - Travaux Publics - Architecture - Immobilier',
'120|DOMAIN_6|ACTIVITY_14': u'-- Bâtiment - Travaux Publics',
'121|DOMAIN_6|ACTIVITY_15': u'-- Architecture - Immobilier ',
'122|DOMAIN_13': u'Bureaux d\'Etudes - Méthodes',
'123|DOMAIN_8': u'Commerce - Vente - Grande Distribution',
'124|DOMAIN_8|ACTIVITY_20': u'-- Commerce - Vente',
'125|DOMAIN_8|ACTIVITY_21': u'-- Grande et Moyenne Distribution',
'126|DOMAIN_9': u'Environnement - Nettoyage - Sécurité',
'127|DOMAIN_9|ACTIVITY_22': u'-- Environnement - HSE - Développement durable',
'128|DOMAIN_9|ACTIVITY_23': u'-- Nettoyage - Assainissement - Pressing',
'129|DOMAIN_9|ACTIVITY_24': u'-- Sécurité - Premiers secours',
'130|DOMAIN_10': u'Hôtellerie - Restauration - Métiers de Bouche',
'131|DOMAIN_10|ACTIVITY_25': u'-- Hôtellerie',
'132|DOMAIN_10|ACTIVITY_27': u'-- Métiers de bouche',
'133|DOMAIN_10|ACTIVITY_26': u'-- Restauration',
'134|DOMAIN_11': u'Industrie',
'135|DOMAIN_11|ACTIVITY_32': u'-- Aéronautique - Navale',
'136|DOMAIN_11|ACTIVITY_33': u'-- Agroalimentaire',
'137|DOMAIN_11|ACTIVITY_58': u'-- Chimie - Pétrochimie',
'138|DOMAIN_11|ACTIVITY_28': u'-- Electricité - Electronique - Automatisme',
'139|DOMAIN_11|ACTIVITY_29': u'-- Maintenance - Entretien - SAV ',
'140|DOMAIN_11|ACTIVITY_30': u'-- Mécanique Générale',
'141|DOMAIN_11|ACTIVITY_31': u'-- Production - Fabrication ',
'142|DOMAIN_11|ACTIVITY_36': u'-- Sidérurgie - Métallurgie - Tuyauterie - Soudure',
'143|DOMAIN_11|ACTIVITY_34': u'-- Nucléaire - Production d\'énergie',
'144|DOMAIN_11|ACTIVITY_35': u'-- Plasturgie - Bois - Papier - Verre - Cuir - Textile',
'145|DOMAIN_12': u'Informatique - Technologie de l\'Information',
'146|DOMAIN_12|ACTIVITY_37': u'-- Direction informatique encadrement',
'147|DOMAIN_12|ACTIVITY_38': u'-- Etude et développement',
'148|DOMAIN_12|ACTIVITY_39': u'-- Exploitation, maintenance et support ',
'149|DOMAIN_12|ACTIVITY_40': u'-- Systèmes et réseaux informatique et télécom',
'150|DOMAIN_14': u'Logistique - Manutention - Transport',
'151|DOMAIN_14|ACTIVITY_42': u'-- Conduite de véhicule',
'152|DOMAIN_14|ACTIVITY_43': u'-- Exploitation de logistique - supply chain',
'153|DOMAIN_14|ACTIVITY_44': u'-- Manutention',
'154|DOMAIN_14|ACTIVITY_45': u'-- Transport',
'155|DOMAIN_15': u'Marketing - Communication - Imprimerie - Edition',
'156|DOMAIN_15|ACTIVITY_47': u'-- Imprimerie - Edition - Arts Graphiques',
'157|DOMAIN_15|ACTIVITY_46': u'-- Marketing - Communication - Medias',
'158|DOMAIN_16': u'Médical - Paramédical - Esthétique',
'159|DOMAIN_16|ACTIVITY_59': u'-- Commerce Appareillage',
'160|DOMAIN_16|ACTIVITY_50': u'-- Directions, Cadres et Enseignement',
'161|DOMAIN_16|ACTIVITY_49': u'-- Rééducation, Radiologie, Appareillage, LAM',
'162|DOMAIN_16|ACTIVITY_51': u'-- Secrétariat, Dentaire, Social, Esthétique et Autres',
'163|DOMAIN_16|ACTIVITY_48': u'-- Soignants - Auxiliaires',
'164|DOMAIN_7': u'Pharmacie (Industrie, Officine) - Recherche clinique',
'165|DOMAIN_7|ACTIVITY_16': u'-- Industrie Pharmaceutique / Cosmétologique - Biotech',
'166|DOMAIN_7|ACTIVITY_17': u'-- Recherche Clinique',
'167|DOMAIN_7|ACTIVITY_18': u'-- Pharmacie Officine / Hospit / Para-pharmacie',
'168|DOMAIN_7|ACTIVITY_19': u'-- Vente, information et promotion du médicament',
'169|DOMAIN_17': u'Télémarketing - Téléservices',
'170|DOMAIN_17|ACTIVITY_52': u'-- Téléconseil - Télévente - Autres',
'171|DOMAIN_17|ACTIVITY_53': u'-- Direction, Encadrement',
'172|DOMAIN_18': u'Tourisme - Loisirs - Spectacle - Audiovisuel',
'173|DOMAIN_18|ACTIVITY_54': u'-- Tourisme - Loisirs',
'174|DOMAIN_18|ACTIVITY_55': u'-- Spectacle - Audiovisuel',
}.iteritems())])
CONFIG = BackendConfig(Value('publication_date', label=u'Publication Date', choices=publicationDate_choices),
Value('place', label=u'Place', choices=places_choices),
Value('contract', labe=u'Contract type', choices=type_contract_choices),
Value('activity_domain', label=u'Activity Domain', choices=activityDomain_choices),
)
def search_job(self, pattern=None):
with self.browser:
for advert in self.browser.search_job(pattern):
yield advert
def decode_choice(self, place):
splitted_choice = place.split('|')
part1 = splitted_choice[1].split('_')[1]
if len(splitted_choice) == 3:
part2 = splitted_choice[2].split('_')[1]
return part1, part2
else:
return part1, 0
def advanced_search_job(self):
region, departement = self.decode_choice(self.config['place'].get())
domain, category = self.decode_choice(self.config['activity_domain'].get())
for advert in self.browser.advanced_search_job(publication_date=int(self.config['publication_date'].get()),
contract_type=int(self.config['contract'].get()),
conty=departement,
region=region,
job_category=category,
activity_domain=domain
):
yield advert
def get_job_advert(self, _id, advert=None):
with self.browser:
return self.browser.get_job_advert(_id, advert)
def fill_obj(self, advert, fields):
self.get_job_advert(advert.id, advert)
OBJECTS = {AdeccoJobAdvert: fill_obj}
| agpl-3.0 |
lemonade512/personal-blog | lib/click/core.py | 136 | 70254 | import errno
import os
import sys
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import make_str, make_default_short_help, echo, get_os_args
from .exceptions import ClickException, UsageError, BadParameter, Abort, \
MissingParameter
from .termui import prompt, confirm
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from .globals import push_context, pop_context
from ._compat import PY2, isidentifier, iteritems
from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
_missing = object()
SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = 'It is not possible to add multi commands as children to ' \
'another multi command that is in chain mode'
else:
hint = 'Found a multi command as subcommand to a multi command ' \
'that is in chain mode. This is not supported'
raise RuntimeError('%s. Command "%s" is set to chain and "%s" was '
'added as subcommand but it in itself is a '
'multi command. ("%s" is a %s within a chained '
'%s named "%s"). This restriction was supposed to '
'be lifted in 6.0 but the fix was flawed. This '
'will be fixed in Click 7.0' % (
hint, base_command.name, cmd_name,
cmd_name, cmd.__class__.__name__,
base_command.__class__.__name__,
base_command.name))
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, '__code__', None)
args = getattr(code, 'co_argcount', 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(Warning('Invoked legacy parameter callback "%s". The new '
'signature for such callbacks starting with '
'click 2.0 is (ctx, param, value).'
% callback), stacklevel=3)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float('inf')
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(self, command, parent=None, info_name=None, obj=None,
auto_envvar_prefix=None, default_map=None,
terminal_width=None, max_content_width=None,
resilient_parsing=False, allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None, help_option_names=None,
token_normalize_func=None, color=None):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, 'meta', {})
#: A dictionary (-like object) with defaults for parameters.
if default_map is None \
and parent is not None \
and parent.default_map is not None:
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ['--help']
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if parent is not None \
and parent.auto_envvar_prefix is not None and \
self.info_name is not None:
auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
self.info_name.upper())
else:
self.auto_envvar_prefix = auto_envvar_prefix.upper()
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utiltiies can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = __name__ + '.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(width=self.terminal_width,
max_width=self.max_content_width)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ''
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + ' ' + rv
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
sys.exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs):
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError('The given command does not have a '
'callback that can be invoked.')
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError('Callback is not a command.')
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def get_usage(self, ctx):
raise NotImplementedError('Base commands cannot get usage')
def get_help(self, ctx):
raise NotImplementedError('Base commands cannot get help')
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError('Base commands do not know how to parse '
'arguments.')
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError('Base commands are not invokable by default')
def main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point of reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(
sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except IOError as e:
if e.errno == errno.EPIPE:
sys.exit(1)
else:
raise
except Abort:
if not standalone_mode:
raise
echo('Aborted!', file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
"""
def __init__(self, name, context_settings=None, callback=None,
params=None, help=None, epilog=None, short_help=None,
options_metavar='[OPTIONS]', add_help_option=True):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
if short_help is None and help:
short_help = make_default_short_help(help)
self.short_help = short_help
self.add_help_option = add_help_option
def get_usage(self, ctx):
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, ' '.join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(help_options, is_flag=True,
is_eager=True, expose_value=False,
callback=show_help,
help='Show this message and exit.')
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
parser.allow_interspersed_args = ctx.allow_interspersed_args
parser.ignore_unknown_options = ctx.ignore_unknown_options
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it. This creates a
formatter and will call into the following formatting methods:
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip('\n')
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.help)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section('Options'):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(
param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail('Got unexpected extra argument%s (%s)'
% (len(args) != 1 and 's' or '',
' '.join(map(make_str, args))))
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(self, name=None, invoke_without_command=False,
no_args_is_help=None, subcommand_metavar=None,
chain=False, result_callback=None, **attrs):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError('Multi commands in chain mode cannot '
'have optional arguments.')
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs),
*args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
rows = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help = cmd.short_help or ''
rows.append((subcommand, help))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value,
**ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail('Missing command.')
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = args and '*' or None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail('No such command "%s".' % original_cmd_name)
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError('Command has no name.')
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. In Click 2.0, the old callback format will still work,
but it will raise a warning to give you change to migrate the
code easier.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value. Before Click
2.0, the signature was ``(ctx, value)``.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
"""
param_type_name = 'parameter'
def __init__(self, param_decls=None, type=None, required=False,
default=None, callback=None, nargs=None, metavar=None,
expose_value=True, is_eager=False, envvar=None):
self.name, self.opts, self.secondary_opts = \
self._parse_decls(param_decls or (), expose_value)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += '...'
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = ctx.lookup_default(self.name)
if value is None:
value = self.value_from_envvar(ctx)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError('Attempted to invoke composite type '
'but nargs has been set to %s. This is '
'not supported; nargs needs to be set to '
'a fixed value > 1.' % self.nargs)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
return os.environ.get(self.envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(
self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown.
:param prompt: if set to `True` or a non empty string then the user will
be prompted for input if not set. If set to `True` the
prompt will be the option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
"""
param_type_name = 'option'
def __init__(self, param_decls=None, show_default=False,
prompt=False, confirmation_prompt=False,
hide_input=False, is_flag=None, flag_value=None,
multiple=False, count=False, allow_from_autoenv=True,
type=None, help=None, **attrs):
default_is_missing = attrs.get('default', _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace('_', ' ').capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) \
and type is None:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError('Options cannot have nargs < 0')
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError('Cannot prompt for flags that are not bools.')
if not self.is_bool_flag and self.secondary_opts:
raise TypeError('Got secondary option for non boolean flag.')
if self.is_bool_flag and self.hide_input \
and self.prompt is not None:
raise TypeError('Hidden input does not work with boolean '
'flag prompts.')
if self.count:
if self.multiple:
raise TypeError('Options cannot be multiple and count '
'at the same time.')
elif self.is_flag:
raise TypeError('Options cannot be count and flags at '
'the same time.')
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError('Name defined twice')
name = decl
else:
split_char = decl[:1] == '/' and ';' or '/'
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: len(x[0]))
name = possible_names[-1][1].replace('-', '_').lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError('Could not determine name for option')
if not opts and not secondary_opts:
raise TypeError('No options defined but a name was passed (%s). '
'Did you mean to declare an argument instead '
'of an option?' % name)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {
'dest': self.name,
'nargs': self.nargs,
'obj': self,
}
if self.multiple:
action = 'append'
elif self.count:
action = 'count'
else:
action = 'store'
if self.is_flag:
kwargs.pop('nargs', None)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(self.opts, action=action + '_const',
const=True, **kwargs)
parser.add_option(self.secondary_opts, action=action +
'_const', const=False, **kwargs)
else:
parser.add_option(self.opts, action=action + '_const',
const=self.flag_value,
**kwargs)
else:
kwargs['action'] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += ' ' + self.make_metavar()
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ''
extra = []
if self.default is not None and self.show_default:
extra.append('default: %s' % (
', '.join('%s' % d for d in self.default)
if isinstance(self.default, (list, tuple))
else self.default, ))
if self.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(self.prompt, default=default,
hide_input=self.hide_input,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x))
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and \
ctx.auto_envvar_prefix is not None:
envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None \
and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = 'argument'
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get('default') is not None:
required = False
else:
required = attrs.get('nargs', 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError('nargs=-1 in combination with a default value '
'is not supported.')
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.name.upper()
if not self.required:
var = '[%s]' % var
if self.nargs != 1:
var += '...'
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError('Could not determine name for argument')
if len(decls) == 1:
name = arg = decls[0]
name = name.replace('-', '_').lower()
elif len(decls) == 2:
name, arg = decls
else:
raise TypeError('Arguments take exactly one or two '
'parameter declarations, got %d' % len(decls))
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs,
obj=self)
# Circular dependency between decorators and core
from .decorators import command, group
| mit |
feketemihai/l10n-romania | l10n_ro_siruta/__init__.py | 2 | 1033 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Fekete Mihai <mihai.fekete@forbiom.eu>
# Copyright (C) 2014 FOREST AND BIOMASS SERVICES ROMANIA SA
# (http://www.forbiom.eu).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import siruta
import res_partner
| agpl-3.0 |
40223204/w16b_test | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/util.py | 696 | 9917 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| agpl-3.0 |
dgwakeman/mne-python | mne/preprocessing/tests/test_ctps.py | 20 | 3124 | # Authors: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from mne.time_frequency import morlet
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_equal
from mne.preprocessing.ctps_ import (ctps, _prob_kuiper,
_compute_normalized_phase)
###############################################################################
# Generate testing signal
tmin = -0.3
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series from Morlet wavelet
single_trial = np.zeros((1, len(times)))
Ws = morlet(sfreq, [3], n_cycles=[1])
single_trial[0][:len(Ws[0])] = np.real(Ws[0])
roll_to = 300 - 265 # shift data to center of time window
single_trial = np.roll(single_trial, roll_to)
rng = np.random.RandomState(42)
def get_data(n_trials, j_extent):
"""Generate ground truth and testing data"""
ground_truth = np.tile(single_trial, n_trials)
my_shape = n_trials, 1, 600
random_data = rng.random_sample(my_shape)
rand_ints = rng.random_integers(-j_extent, j_extent, n_trials)
jittered_data = np.array([np.roll(single_trial, i) for i in rand_ints])
data = np.concatenate([ground_truth.reshape(my_shape),
jittered_data.reshape(my_shape),
random_data.reshape(my_shape)], 1)
assert_true(data.shape == (n_trials, 3, 600))
return data
# vary extent of jittering --> creates phaselocks at the borders if
# 2 * extent != n_samples
iter_test_ctps = enumerate(zip([400, 400], [150, 300], [0.6, 0.2]))
def test_ctps():
""" Test basic ctps functionality
"""
for ii, (n_trials, j_extent, pk_max) in iter_test_ctps:
data = get_data(n_trials, j_extent)
ks_dyn, pk_dyn, phase_trial = ctps(data)
data2 = _compute_normalized_phase(data)
ks_dyn2, pk_dyn2, phase_trial2 = ctps(data2, is_raw=False)
for a, b in zip([ks_dyn, pk_dyn, phase_trial],
[ks_dyn2, pk_dyn2, data2]):
assert_array_equal(a, b)
assert_true(a.min() >= 0)
assert_true(a.max() <= 1)
assert_true(b.min() >= 0)
assert_true(b.max() <= 1)
# test for normalization
assert_true((pk_dyn.min() > 0.0) or (pk_dyn.max() < 1.0))
# test shapes
assert_true(phase_trial.shape == data.shape)
assert_true(pk_dyn.shape == data.shape[1:])
# tets ground_truth + random + jittered case
assert_true(pk_dyn[0].max() == 1.0)
assert_true(len(np.unique(pk_dyn[0])) == 1.0)
assert_true(pk_dyn[1].max() < pk_max)
assert_true(pk_dyn[2].max() > 0.3)
if ii < 1:
assert_raises(ValueError, ctps,
data[:, :, :, None])
assert_true(_prob_kuiper(1.0, 400) == 1.0)
# test vecrosization
assert_array_equal(_prob_kuiper(np.array([1.0, 1.0]), 400),
_prob_kuiper(np.array([1.0, 1.0]), 400))
assert_true(_prob_kuiper(0.1, 400) < 0.1)
| bsd-3-clause |
cpennington/edx-platform | openedx/core/lib/grade_utils.py | 9 | 2509 | """
Helpers functions for grades and scores.
"""
import math
def compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False):
"""
Returns a tuple of:
1. Whether the 2nd set of scores is higher than the first.
2. Grade percentage of 1st set of scores.
3. Grade percentage of 2nd set of scores.
If ``treat_undefined_as_zero`` is True, this function will treat
cases where ``possible1`` or ``possible2`` is 0 as if
the (earned / possible) score is 0. If this flag is false,
a ZeroDivisionError is raised.
"""
try:
percentage1 = float(earned1) / float(possible1)
except ZeroDivisionError:
if not treat_undefined_as_zero:
raise
percentage1 = 0.0
try:
percentage2 = float(earned2) / float(possible2)
except ZeroDivisionError:
if not treat_undefined_as_zero:
raise
percentage2 = 0.0
is_higher = percentage2 >= percentage1
return is_higher, percentage1, percentage2
def is_score_higher_or_equal(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False):
"""
Returns whether the 2nd set of scores is higher than the first.
If ``treat_undefined_as_zero`` is True, this function will treat
cases where ``possible1`` or ``possible2`` is 0 as if
the (earned / possible) score is 0. If this flag is false,
a ZeroDivisionError is raised.
"""
is_higher_or_equal, _, _ = compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero)
return is_higher_or_equal
def round_away_from_zero(number, digits=0):
"""
Round numbers using the 'away from zero' strategy as opposed to the
'Banker's rounding strategy.' The strategy refers to how we round when
a number is half way between two numbers. eg. 0.5, 1.5, etc. In python 2
positive numbers in this category would be rounded up and negative numbers
would be rounded down. ie. away from zero. In python 3 numbers round
towards even. So 0.5 would round to 0 but 1.5 would round to 2.
See here for more on floating point rounding strategies:
https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules
We want to continue to round away from zero so that student grades remain
consistent and don't suddenly change.
"""
p = 10.0 ** digits
if number >= 0:
return float(math.floor((number * p) + 0.5)) / p
else:
return float(math.ceil((number * p) - 0.5)) / p
| agpl-3.0 |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/creative_template_service/get_system_defined_creative_templates.py | 1 | 2005 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all system defined creative templates.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201808')
# Create a statement to select creative templates.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('type = :type')
.WithBindVariable('type', 'SYSTEM_DEFINED'))
# Retrieve a small amount of creative templates at a time, paging
# through until all creative templates have been retrieved.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for creative_template in response['results']:
# Print out some information for each creative template.
print('Creative template with ID "%d" and name "%s" was found.\n' %
(creative_template['id'], creative_template['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 |
ChromiumWebApps/chromium | chrome/test/functional/media/media_jerky.py | 78 | 8162 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Jerkiness performance test for video playback.
Uses jerky tool, (http://go/jerky), to record a jerkiness metric for videos
sensitive to jerkiness.
Jerkiness is defined as a percentage of the average on screen frame time by the
formula below. Where smoothed_frame_time[i] represents a frame's on screen time
plus amortized measurement gap error (time taken to capture each frame).
sqrt(average((avg_frame_time - smoothed_frame_time[i])^2, i=m..n))
------------------------------------------------------------------
avg_frame_time
Currently, only the Linux binaries are checked in for this test since we only
have a Linux performance bot. The current binary is a custom build with some
fixes from veganjerky (http://go/veganjerky) for timing, waiting, and stdout
flushing.
TODO(dalecurtis): Move Jerky tool sources into the Chromium tree.
TODO(dalecurtis): Jerky tool uses a questionable method for determining torn
frames, determine if it is actually worth recording.
"""
import glob
import logging
import os
import re
import subprocess
import tempfile
import pyauto_media
import pyauto
import pyauto_utils
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_jerky.html')
# Path under data path for test files.
_TEST_MEDIA_PATH = os.path.join('pyauto_private', 'media', 'birds')
# Path to Jerky tool executable.
_JERKY_PATH = os.path.join('pyauto_private', 'media', 'tools', 'jerky')
# Regular expression for extracting jerkiness percentage. Sample line:
# using 1:9 title 'test.log (35.36% jerky, 0 teared frames)' lw 2 with lines
_JERKY_LOG_REGEX = re.compile(
r'\((\d{0,3}\.?\d{0,2})% jerky, (\d+) teared frames\)')
# Regular expression for extracting computed fps. Sample line:
# INFO: 33797 us per frame => 29.6 fps.
_JERKY_LOG_FPS_REGEX = re.compile(r' => (\d+\.\d+) fps')
# Minimum and maximum number of iterations for each test. Due to timing issues
# the jerky tool may not always calculate the fps correctly. When invalid
# results are detected, the test is rerun up to the maxium # of times set below.
_JERKY_ITERATIONS_MIN = 3
_JERKY_ITERATIONS_MAX = 10
# The media files used for testing. Each entry represents a tuple of (filename,
# width, height, fps). The width and height are used to create a calibration
# pattern for jerky tool. The fps is used to ensure Jerky tool computed a valid
# result.
_TEST_VIDEOS = [('birds540.webm', 960, 540, 29.9)]
def GetTempFilename():
"""Returns an absolute path to an empty temp file."""
f, path = tempfile.mkstemp(prefix='jerky_tmp')
os.close(f)
return path
class MediaJerkyPerfTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def StartJerkyCapture(self):
"""Starts jerky tool in capture mode and waits until its ready to capture.
Returns:
A tuple of the jerky process and an absolute path to the capture log.
"""
jerky_log = GetTempFilename()
logging.debug('Logging data to %s', jerky_log)
process = subprocess.Popen(
[os.path.join(self.DataDir(), _JERKY_PATH),
'capture', '--log', jerky_log],
stdout=subprocess.PIPE)
# Set the jerky tool process to soft-realtime w/ round-robin scheduling.
subprocess.check_call(['sudo', 'chrt', '-r', '-p', str(process.pid)])
# Wait for server to start up.
line = True
while line:
line = process.stdout.readline()
if 'Waiting for calibration pattern to disappear' in line:
return process, jerky_log
self.fail('Failed to launch Jerky tool.')
def AnalyzeJerkyCapture(self, jerky_log):
"""Run jerky analyze on the specified log and return various metrics.
Once analyze has completed, the jerky_log and associated outputs will be
removed.
Args:
jerky_log: Absolute path to the capture log.
Returns:
Tuple of fps, jerkiness, and torn frames.
"""
results_log_base = GetTempFilename()
process = subprocess.Popen(
[os.path.join(self.DataDir(), _JERKY_PATH),
'analyze', '--ref', jerky_log, '--out', results_log_base],
stdout=subprocess.PIPE)
# Wait for process to end w/o risking deadlock.
stdout = process.communicate()[0]
self.assertEquals(process.returncode, 0)
# Scrape out the calculated FPS.
fps_match = None
for line in stdout.splitlines():
fps_match = _JERKY_LOG_FPS_REGEX.search(line)
if fps_match:
break
# Open *.error.gnuplot and scrape out jerkiness.
jerky_match = None
with open('%s.error.gnuplot' % results_log_base) as results:
for line in results:
jerky_match = _JERKY_LOG_REGEX.search(line)
if jerky_match:
break
# Cleanup all the temp and results files jerky spits out.
for log in glob.glob('%s*' % results_log_base) + [jerky_log]:
os.unlink(log)
if fps_match and jerky_match:
return (float(fps_match.group(1)), float(jerky_match.group(1)),
int(jerky_match.group(2)))
return None, None, None
def testMediaJerkyPerformance(self):
"""Launches Jerky tool and records jerkiness for HTML5 videos.
For each video, the test starts up jerky tool then plays until the Jerky
tool collects enough information. Next the capture log is analyzed using
Jerky's analyze command. If the computed fps matches the expected fps the
jerkiness metric is recorded.
The test will run up to _JERKY_ITERATIONS_MAX times in an attempt to get at
least _JERKY_ITERATIONS_MIN valid values. The results are recorded under
the 'jerkiness' variable for graphing on the bots.
"""
self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))
# Xvfb on the bots is restricted to 1024x768 at present. Ensure we're using
# all of the real estate we can. Jerky tool needs a clear picture of every
# frame, so we can't clip the video in any way.
self.SetWindowDimensions(0, 0, 1024, 768)
for name, width, height, expected_fps in _TEST_VIDEOS:
jerkiness = []
torn_frames = []
file_url = self.GetFileURLForDataPath(
os.path.join(_TEST_MEDIA_PATH, name))
# Initialize the calibration area for Jerky tool.
self.assertTrue(self.ExecuteJavascript(
'initializeTest(%d, %d);' % (width, height)))
runs_left = _JERKY_ITERATIONS_MIN
runs_total = 0
while runs_left > 0 and runs_total < _JERKY_ITERATIONS_MAX:
runs_total += 1
logging.info('Running Jerky perf test #%d for %s.', runs_total, name)
# Startup Jerky tool in capture mode.
jerky_process, jerky_log = self.StartJerkyCapture()
# Start playback of the test video.
self.assertTrue(self.ExecuteJavascript("startTest('%s');" % file_url))
# Wait for jerky tool to finish if it hasn't already.
self.assertTrue(jerky_process.wait() == 0)
# Stop playback of the test video so the next run can cleanly find the
# calibration zone.
self.assertTrue(self.ExecuteJavascript('stopTest();'))
# Analyze the results.
jerky_fps, jerky_percent, jerky_torn_frames = self.AnalyzeJerkyCapture(
jerky_log)
if (jerky_fps is None or jerky_percent is None or
jerky_torn_frames is None):
logging.error('No metrics recorded for this run.')
continue
# Ensure the results for this run are valid.
if jerky_fps != expected_fps:
logging.error(
'Invalid fps detected (actual: %f, expected: %f, jerkiness: %f). '
'Discarding results for this run.', jerky_fps, expected_fps,
jerky_percent)
continue
jerkiness.append(jerky_percent)
torn_frames.append(jerky_torn_frames)
runs_left -= 1
pyauto_utils.PrintPerfResult('jerkiness', name, jerkiness, '%')
if __name__ == '__main__':
pyauto_media.Main()
| bsd-3-clause |
jessekl/flixr | venv/lib/python2.7/site-packages/Crypto/SelfTest/Util/__init__.py | 116 | 1743 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/__init__.py: Self-test for utility modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for utility modules"""
__revision__ = "$Id$"
import os
def get_tests(config={}):
tests = []
if os.name == 'nt':
from Crypto.SelfTest.Util import test_winrandom; tests += test_winrandom.get_tests(config=config)
from Crypto.SelfTest.Util import test_number; tests += test_number.get_tests(config=config)
from Crypto.SelfTest.Util import test_Counter; tests += test_Counter.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
urthbound/google-python-exercises | basic/string1.py | 1 | 3701 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
output = 'Number of donuts: '
if count >= 10:
output += 'many'
else:
output += str(count)
return output
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
output = ''
if len(s) > 2:
output += s[0]
output += s[1]
output += s[-2]
output += s[-1]
else:
pass
return output
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
match = s[0]
output = s[1:-1].replace(match, '*')
output = (match + output + s[-1])
return output
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
output = '%s%s %s%s' %(b[:2], a[2:], a[:2], b[2:])
return output
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| apache-2.0 |
BehavioralInsightsTeam/edx-platform | openedx/core/djangoapps/lang_pref/tests/test_middleware.py | 9 | 10069 | """
Tests for lang_pref middleware.
"""
import itertools
import mock
import ddt
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from django.test.client import RequestFactory
from django.http import HttpResponse
from django.contrib.sessions.middleware import SessionMiddleware
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import parse_accept_lang_header
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY, COOKIE_DURATION
from openedx.core.djangoapps.lang_pref.middleware import LanguagePreferenceMiddleware
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference, get_user_preference, delete_user_preference
from student.tests.factories import UserFactory
from student.tests.factories import AnonymousUserFactory
@ddt.ddt
class TestUserPreferenceMiddleware(TestCase):
"""
Tests to make sure user preferences are getting properly set in the middleware.
"""
def setUp(self):
super(TestUserPreferenceMiddleware, self).setUp()
self.middleware = LanguagePreferenceMiddleware()
self.session_middleware = SessionMiddleware()
self.user = UserFactory.create()
self.anonymous_user = AnonymousUserFactory()
self.request = RequestFactory().get('/somewhere')
self.request.user = self.user
self.request.META['HTTP_ACCEPT_LANGUAGE'] = 'ar;q=1.0' # pylint: disable=no-member
self.session_middleware.process_request(self.request)
def test_logout_shouldnt_remove_cookie(self):
self.middleware.process_request(self.request)
self.request.user = self.anonymous_user
response = mock.Mock(spec=HttpResponse)
self.middleware.process_response(self.request, response)
response.delete_cookie.assert_not_called()
@ddt.data(None, 'es', 'en')
def test_preference_setting_changes_cookie(self, lang_pref_out):
"""
Test that the LANGUAGE_COOKIE is always set to the user's current language preferences
at the end of the request, with an expiry that's the same as the users current session cookie.
"""
if lang_pref_out:
set_user_preference(self.user, LANGUAGE_KEY, lang_pref_out)
else:
delete_user_preference(self.user, LANGUAGE_KEY)
response = mock.Mock(spec=HttpResponse)
self.middleware.process_response(self.request, response)
if lang_pref_out:
response.set_cookie.assert_called_with(
settings.LANGUAGE_COOKIE,
value=lang_pref_out,
domain=settings.SESSION_COOKIE_DOMAIN,
max_age=COOKIE_DURATION,
)
else:
response.delete_cookie.assert_called_with(
settings.LANGUAGE_COOKIE,
domain=settings.SESSION_COOKIE_DOMAIN,
)
self.assertNotIn(LANGUAGE_SESSION_KEY, self.request.session)
@ddt.data(*itertools.product(
(None, 'eo', 'es'), # LANGUAGE_COOKIE
(None, 'es', 'en'), # Language Preference In
))
@ddt.unpack
@mock.patch('openedx.core.djangoapps.lang_pref.middleware.set_user_preference')
def test_preference_cookie_changes_setting(self, lang_cookie, lang_pref_in, mock_set_user_preference):
self.request.COOKIES[settings.LANGUAGE_COOKIE] = lang_cookie
if lang_pref_in:
set_user_preference(self.user, LANGUAGE_KEY, lang_pref_in)
else:
delete_user_preference(self.user, LANGUAGE_KEY)
self.middleware.process_request(self.request)
if lang_cookie is None:
self.assertEqual(mock_set_user_preference.mock_calls, [])
else:
mock_set_user_preference.assert_called_with(self.user, LANGUAGE_KEY, lang_cookie)
@ddt.data(*(
(logged_in, ) + test_def
for logged_in in (True, False)
for test_def in [
# (LANGUAGE_COOKIE, LANGUAGE_SESSION_KEY, Accept-Language In,
# Accept-Language Out, Session Lang Out)
(None, None, None, None, None),
(None, 'eo', None, None, 'eo'),
(None, 'en', None, None, 'en'),
(None, 'eo', 'en', 'en', 'eo'),
(None, None, 'en', 'en', None),
('en', None, None, 'en', None),
('en', 'en', None, 'en', 'en'),
('en', None, 'eo', 'en;q=1.0,eo', None),
('en', None, 'en', 'en', None),
('en', 'eo', 'en', 'en', None),
('en', 'eo', 'eo', 'en;q=1.0,eo', None)
]
))
@ddt.unpack
def test_preference_cookie_overrides_browser(
self, logged_in, lang_cookie, lang_session_in, accept_lang_in, accept_lang_out,
lang_session_out,
):
if not logged_in:
self.request.user = self.anonymous_user
if lang_cookie:
self.request.COOKIES[settings.LANGUAGE_COOKIE] = lang_cookie
if lang_session_in:
self.request.session[LANGUAGE_SESSION_KEY] = lang_session_in
if accept_lang_in:
self.request.META['HTTP_ACCEPT_LANGUAGE'] = accept_lang_in
else:
del self.request.META['HTTP_ACCEPT_LANGUAGE']
self.middleware.process_request(self.request)
accept_lang_result = self.request.META.get('HTTP_ACCEPT_LANGUAGE')
if accept_lang_result:
accept_lang_result = parse_accept_lang_header(accept_lang_result)
if accept_lang_out:
accept_lang_out = parse_accept_lang_header(accept_lang_out)
if accept_lang_out and accept_lang_result:
self.assertItemsEqual(accept_lang_result, accept_lang_out)
else:
self.assertEqual(accept_lang_result, accept_lang_out)
self.assertEquals(self.request.session.get(LANGUAGE_SESSION_KEY), lang_session_out)
@ddt.data(None, 'es', 'en')
def test_logout_preserves_cookie(self, lang_cookie):
if lang_cookie:
self.client.cookies[settings.LANGUAGE_COOKIE] = lang_cookie
elif settings.LANGUAGE_COOKIE in self.client.cookies:
del self.client.cookies[settings.LANGUAGE_COOKIE]
# Use an actual call to the logout endpoint, because the logout function
# explicitly clears all cookies
self.client.get(reverse('logout'))
if lang_cookie:
self.assertEqual(
self.client.cookies[settings.LANGUAGE_COOKIE].value,
lang_cookie
)
else:
self.assertNotIn(settings.LANGUAGE_COOKIE, self.client.cookies)
@ddt.data(
(None, None),
('es', 'es-419'),
('en', 'en'),
('es-419', 'es-419')
)
@ddt.unpack
def test_login_captures_lang_pref(self, lang_cookie, expected_lang):
if lang_cookie:
self.client.cookies[settings.LANGUAGE_COOKIE] = lang_cookie
elif settings.LANGUAGE_COOKIE in self.client.cookies:
del self.client.cookies[settings.LANGUAGE_COOKIE]
# Use an actual call to the login endpoint, to validate that the middleware
# stack does the right thing
if settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'):
response = self.client.post(
reverse('user_api_login_session'),
data={
'email': self.user.email,
'password': UserFactory._DEFAULT_PASSWORD,
'remember': True,
}
)
else:
response = self.client.post(
reverse('login_post'),
data={
'email': self.user.email,
'password': UserFactory._DEFAULT_PASSWORD,
'honor_code': True,
}
)
self.assertEqual(response.status_code, 200)
if lang_cookie:
self.assertEqual(response['Content-Language'], expected_lang)
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), lang_cookie)
self.assertEqual(
self.client.cookies[settings.LANGUAGE_COOKIE].value,
lang_cookie
)
else:
self.assertEqual(response['Content-Language'], 'en')
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), None)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE].value, '')
def test_process_response_no_user_noop(self):
del self.request.user
response = mock.Mock(spec=HttpResponse)
result = self.middleware.process_response(self.request, response)
self.assertIs(result, response)
self.assertEqual(response.mock_calls, [])
def test_preference_update_noop(self):
self.request.COOKIES[settings.LANGUAGE_COOKIE] = 'es'
# No preference yet, should write to the database
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), None)
self.middleware.process_request(self.request)
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), 'es')
response = mock.Mock(spec=HttpResponse)
with self.assertNumQueries(1):
self.middleware.process_response(self.request, response)
# Preference is the same as the cookie, shouldn't write to the database
with self.assertNumQueries(3):
self.middleware.process_request(self.request)
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), 'es')
response = mock.Mock(spec=HttpResponse)
with self.assertNumQueries(1):
self.middleware.process_response(self.request, response)
# Cookie changed, should write to the database again
self.request.COOKIES[settings.LANGUAGE_COOKIE] = 'en'
self.middleware.process_request(self.request)
self.assertEqual(get_user_preference(self.user, LANGUAGE_KEY), 'en')
with self.assertNumQueries(1):
self.middleware.process_response(self.request, response)
| agpl-3.0 |
takis/django | tests/auth_tests/test_context_processors.py | 269 | 6773 | import datetime
from django.contrib.auth import authenticate
from django.contrib.auth.context_processors import PermLookupDict, PermWrapper
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.test import SimpleTestCase, TestCase, override_settings
from .settings import AUTH_MIDDLEWARE_CLASSES, AUTH_TEMPLATES
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(SimpleTestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertIn('mockapp', perms)
self.assertNotIn('nonexisting', perms)
self.assertIn('mockapp.someperm', perms)
self.assertNotIn('mockapp.nonexisting', perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| bsd-3-clause |
liavkoren/djangoDev | tests/logging_tests/tests.py | 19 | 13277 | from __future__ import unicode_literals
import logging
import warnings
from django.core import mail
from django.test import TestCase, RequestFactory, override_settings
from django.test.utils import patch_logger
from django.utils.encoding import force_text
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.log import (CallbackFilter, RequireDebugFalse,
RequireDebugTrue)
from django.utils.six import StringIO
from admin_scripts.tests import AdminScriptTestCase
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(TestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), False)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertTrue('Foo Deprecated' in output)
def test_warnings_capture_debug_false(self):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertFalse('Foo Deprecated' in output)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertFalse('\n' in mail.outbox[0].subject)
self.assertFalse('\r' in mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(TestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig')
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(TestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('path:/suspicious/,', mail.outbox[0].body)
| bsd-3-clause |
api-ai/api-ai-python | setup.py | 3 | 2304 | # -*- coding:utf8 -*-
# !/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script for using and create package for pip.
"""
import sys
import os
# import apiai
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
# os.system('python setup.py sdist upload')
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
PACKAGES = [
'apiai',
'apiai.requests',
'apiai.requests.query',
'apiai.requests.user_entities'
]
REQUIRES = [
'numpy'
]
EXTRAS_REQUIRE = {
'numpy': ['numpy']
}
with open('README.rst', 'r') as f:
README = f.read()
with open('HISTORY.rst', 'r') as f:
HISTORY = f.read()
setup(
name='apiai',
version='1.2.3',
description=(
'The API.AI iOS SDK makes it easy to integrate speech '
'recognition with API.AI natural language processing '
'API on iOS devices.'
),
long_description=README + '\n\n' + HISTORY,
author='Dmitriy Kuragin',
author_email='kuragin@speaktoit.com',
license='Apache 2.0',
url='https://api.ai/',
packages=PACKAGES,
install_requires=REQUIRES,
# extras_require=EXTRAS_REQUIRE,
package_data={'': ['LICENSE']},
classifiers=(
# 'Development Status :: 5 - Production/Stable',
# 'Intended Audience :: Developers',
# 'Natural Language :: English',
# 'License :: OSI Approved :: Apache Software License',
# 'Programming Language :: Python',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4'
),
)
| apache-2.0 |
praekelt/nurseconnect | nurseconnect/tests/test_utils.py | 1 | 4506 | from freezegun import freeze_time
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import SiteLanguageRelation, Languages, Main
from molo.surveys.models import MoloSurveyPage, MoloSurveySubmission
from molo.surveys.tests.test_models import create_survey
from nurseconnect.utils import (
get_period_date_format,
convert_string_to_boolean_list,
get_survey_results_for_user,
)
class UtilsTestCase(TestCase):
@freeze_time("2018-02-01")
def test_get_period_date_format_1(self):
self.assertEqual(
get_period_date_format(),
"201802"
)
@freeze_time("2012-12-01")
def test_get_period_date_format_2(self):
self.assertEqual(
get_period_date_format(),
"201212"
)
def test_convert_string_to_boolean_list_1(self):
self.assertEqual(
convert_string_to_boolean_list("true"),
[True]
)
def test_convert_string_to_boolean_list_2(self):
self.assertEqual(
convert_string_to_boolean_list("true,false"),
[True, False]
)
def test_convert_string_to_boolean_list_3(self):
self.assertEqual(
convert_string_to_boolean_list(" true, false"),
[True, False]
)
def test_convert_string_to_boolean_list_4(self):
self.assertEqual(
convert_string_to_boolean_list("TRUE,FalSE"),
[True, False]
)
def test_convert_string_to_boolean_list_5(self):
self.assertEqual(
convert_string_to_boolean_list("true,BANANA,false"),
[True, False]
)
def test_convert_string_to_boolean_list_6(self):
self.assertEqual(
convert_string_to_boolean_list("false , True"),
[False, True]
)
def test_convert_string_to_boolean_list_7(self):
self.assertEqual(
convert_string_to_boolean_list("false;true"),
[]
)
class SurveyUtilsTestCase(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.user = User.objects.create_user(
username='tester',
email='tester@example.com',
password='tester')
def test_get_survey_results_for_user_1(self):
create_survey([
{
"question": "The sky is blue",
"type": 'radio',
"choices": ["true", "false"],
"required": True,
"page_break": False,
}
])
survey = MoloSurveyPage.objects.first()
survey.thank_you_text = "true"
survey.save()
MoloSurveySubmission.objects.create(
page=survey, user=self.user,
form_data='{"the-sky-is-blue": "True"}')
self.assertEqual(
get_survey_results_for_user(survey, self.user),
[{
"question": "The sky is blue",
"user_answer": True,
"correct_answer": True,
}]
)
def test_get_survey_results_for_user_2(self):
create_survey([
{
"question": "The sky is blue",
"type": 'radio',
"choices": ["true", "false"],
"required": True,
"page_break": False,
},
{
"question": "The grass is purple",
"type": 'radio',
"choices": ["true", "false"],
"required": True,
"page_break": False,
}
])
survey = MoloSurveyPage.objects.first()
survey.thank_you_text = "true,false"
survey.save()
MoloSurveySubmission.objects.create(
page=survey, user=self.user,
form_data=('{"the-sky-is-blue": "True", '
'"the-grass-is-purple": "True"}'))
self.assertEqual(
get_survey_results_for_user(survey, self.user),
[
{
"question": "The sky is blue",
"user_answer": True,
"correct_answer": True,
},
{
"question": "The grass is purple",
"user_answer": True,
"correct_answer": False,
},
]
)
| bsd-2-clause |
oscaro/django | django/db/models/query.py | 48 | 77464 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
from collections import deque
import copy
import sys
from django.conf import settings
from django.core import exceptions
from django.db import connections, router, transaction, IntegrityError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import AutoField, Empty
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models.sql.constants import CURSOR
from django.db.models import sql
from django.utils.functional import partition
from django.utils import six
from django.utils import timezone
# The maximum number (one less than the max to be precise) of results to fetch
# in a get() query
MAX_GET_RESULTS = 20
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
def _pickle_queryset(class_bases, class_dict):
"""
Used by `__reduce__` to create the initial version of the `QuerySet` class
onto which the output of `__getstate__` will be applied.
See `__reduce__` for more details.
"""
new = Empty()
new.__class__ = type(class_bases[0].__name__, class_bases, class_dict)
return new
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
return Manager.from_queryset(cls)()
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
return obj_dict
def __reduce__(self):
"""
Used by pickle to deal with the types that we create dynamically when
specialized queryset such as `ValuesQuerySet` are used in conjunction
with querysets that are *subclasses* of `QuerySet`.
See `_clone` implementation for more details.
"""
if hasattr(self, '_specialized_queryset_class'):
class_bases = (
self._specialized_queryset_class,
self._base_queryset_class,
)
class_dict = {
'_specialized_queryset_class': self._specialized_queryset_class,
'_base_queryset_class': self._base_queryset_class,
}
return _pickle_queryset, (class_bases, class_dict), self.__getstate__()
return super(QuerySet, self).__reduce__()
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location (see resolve_columns(),
resolve_aggregate()).
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __nonzero__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = False
if connections[self.db].features.supports_select_related:
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = list(self.query.extra_select)
aggregate_select = list(self.query.aggregate_select)
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.concrete_fields
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_concrete_fields_with_model():
if model is None:
model = self.model
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
index_start = len(extra_select)
aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialize
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
if fill_cache:
klass_info = get_klass_info(model, max_depth=max_depth,
requested=requested, only_load=only_load)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(row, index_start, db, klass_info,
offset=len(aggregate_select))
else:
# Omit aggregates in object creation.
row_data = row[index_start:aggregate_start]
if skip:
obj = model_cls(**dict(zip(init_list, row_data)))
else:
obj = model(*row_data)
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i + aggregate_start])
# Add the known related objects to the model, if there are any
if self._known_related_objects:
for field, rel_objs in self._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
force_subq = query.low_mark != 0 or query.high_mark is not None
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db, force_subq=force_subq)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
if (not clone.query.select_for_update or
connections[self.db].features.supports_select_for_update_with_limit):
clone = clone[:MAX_GET_RESULTS + 1]
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" % (
self.model._meta.object_name,
num if num <= MAX_GET_RESULTS else 'more than %s' % MAX_GET_RESULTS
)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_concrete_fields
with transaction.commit_on_success_unless_managed(using=self.db):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
obj = self.model(**params)
try:
with transaction.atomic(using=self.db):
obj.save(force_insert=True, using=self.db)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
qs = self if self.ordered else self.order_by('pk')
try:
return qs[0]
except IndexError:
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
qs = self.reverse() if self.ordered else self.order_by('-pk')
try:
return qs[0]
except IndexError:
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return dict((obj._get_pk_val(), obj) for obj in qs)
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.commit_on_success_unless_managed(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self._clone(klass=DateTimeQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = getattr(self, '_fields', None)
if names is None:
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(list(kwargs))
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
elif not issubclass(self.__class__, klass):
base_queryset_class = getattr(self, '_base_queryset_class', self.__class__)
class_bases = (klass, base_queryset_class)
class_dict = {
'_base_queryset_class': base_queryset_class,
'_specialized_queryset_class': klass,
}
klass = type(klass.__name__, class_bases, class_dict)
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db, hints=self._hints)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.concrete_fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def only(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement only()")
def defer(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement defer()")
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = list(self.query.extra_select)
field_names = self.field_names
aggregate_names = list(self.query.aggregate_select)
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def delete(self):
# values().delete() doesn't work currently - make sure it raises an
# user friendly error.
raise TypeError("Queries with .values() or .values_list() applied "
"can't be deleted")
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query._extra and not self.query._aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if self.query._extra and f in self.query._extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.concrete_fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = list(self.query.extra_select)
field_names = self.field_names
aggregate_names = list(self.query.aggregate_select)
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + [f for f in aggregate_names if f not in self._fields]
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class DateTimeQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateTimeQuery, setup=True, tzinfo=self._tzinfo)
self.query.select = []
self.query.add_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateTimeQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
c._tzinfo = self._tzinfo
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
only_load=None, from_parent=None):
"""
Helper function that recursively returns an information for a klass, to be
used in get_cached_row. It exists just to compute this information only
once for entire queryset. Otherwise it would be computed for each row, which
leads to poor performance on large querysets.
Arguments:
* klass - the class to retrieve (and instantiate)
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determine if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* from_parent - the parent model used to get to this model
Note that when travelling from parent to child, we will only load child
fields which aren't in the parent.
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
if only_load:
load_fields = only_load.get(klass) or set()
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_concrete_fields_with_model():
if from_parent and model and issubclass(from_parent, model):
# Avoid loading fields already loaded for parent model for
# child models.
continue
elif field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
if skip:
klass = deferred_class_factory(klass, skip)
field_names = init_list
else:
field_names = ()
else:
# Load all fields on klass
field_count = len(klass._meta.concrete_fields)
# Check if we need to skip some parent fields.
if from_parent and len(klass._meta.local_concrete_fields) != len(klass._meta.concrete_fields):
# Only load those fields which haven't been already loaded into
# 'from_parent'.
non_seen_models = [p for p in klass._meta.get_parent_list()
if not issubclass(from_parent, p)]
# Load local fields, too...
non_seen_models.append(klass)
field_names = [f.attname for f in klass._meta.concrete_fields
if f.model in non_seen_models]
field_count = len(field_names)
# Try to avoid populating field_names variable for performance reasons.
# If field_names variable is set, we use **kwargs based model init
# which is slower than normal init.
if field_count == len(klass._meta.concrete_fields):
field_names = ()
restricted = requested is not None
related_fields = []
for f in klass._meta.fields:
if select_related_descend(f, restricted, requested, load_fields):
if restricted:
next = requested[f.name]
else:
next = None
klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth + 1,
requested=next, only_load=only_load)
related_fields.append((f, klass_info))
reverse_related_fields = []
if restricted:
for o in klass._meta.get_all_related_objects():
if o.field.unique and select_related_descend(o.field, restricted, requested,
only_load.get(o.model), reverse=True):
next = requested[o.field.related_query_name()]
parent = klass if issubclass(o.model, klass) else None
klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth + 1,
requested=next, only_load=only_load, from_parent=parent)
reverse_related_fields.append((o.field, klass_info))
if field_names:
pk_idx = field_names.index(klass._meta.pk.attname)
else:
pk_idx = klass._meta.pk_index()
return klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx
def get_cached_row(row, index_start, using, klass_info, offset=0,
parent_data=()):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* offset - the number of additional fields that are known to
exist in row for `klass`. This usually means the number of
annotated results on `klass`.
* using - the database alias on which the query is being executed.
* klass_info - result of the get_klass_info function
* parent_data - parent model data in format (field, value). Used
to populate the non-local fields of child models.
"""
if klass_info is None:
return None
klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx = klass_info
fields = row[index_start:index_start + field_count]
# If the pk column is None (or the equivalent '' in the case the
# connection interprets empty strings as nulls), then the related
# object must be non-existent - set the relation to None.
if (fields[pk_idx] is None or
(connections[using].features.interprets_empty_strings_as_nulls and
fields[pk_idx] == '')):
obj = None
elif field_names:
fields = list(fields)
for rel_field, value in parent_data:
field_names.append(rel_field.attname)
fields.append(value)
obj = klass(**dict(zip(field_names, fields)))
else:
obj = klass(*fields)
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
# Instantiate related fields
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f, klass_info in related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
for f, klass_info in reverse_related_fields:
# Transfer data from this object to childs.
parent_data = []
for rel_field, rel_model in klass_info[0]._meta.get_fields_with_model():
if rel_model is not None and isinstance(obj, rel_model):
parent_data.append((rel_field, getattr(obj, rel_field.attname)))
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info,
parent_data=parent_data)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# populate the reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Populate related object caches using parent data.
for rel_field, _ in parent_data:
if rel_field.rel:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
try:
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantiation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in six.iteritems(model_init_field_names):
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
text = self.raw_query
if self.params:
text = text % (self.params if hasattr(self.params, 'keys') else tuple(self.params))
return "<RawQuerySet: %r>" % text
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
additional_lookups = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
to_attr, as_attr = lookup.get_current_to_attr(level)
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
| bsd-3-clause |
Patreon/cartographer | cartographer/field_types/schema_relationship.py | 1 | 4047 | from cartographer.resources import get_resource_registry
from cartographer.resources.resource_registry import ResourceRegistryKeys
class SchemaRelationship(object):
"""
`SchemaRelationship` describes how to translate related resources to and from JSON API and our Python models.
`SchemaRelationship` is has one primary method,
`related_serializer`, for creating a `JSONAPISerializer` instance based on its input arguments.
Subclasses of `SchemaSerializer` can override this method
to customize serialization behavior.
Parsing of related resources is not currently handled by this class,
and instead is handled by the `PostedDocument` class (or, more typically, its subclass `SchemaParser`.
"""
def __init__(self, model_type, id_attribute=None, model_property=None,
model_method=None, serializer_method=None, includes=None):
"""
NOTE: only one of id_attribute, model_property, model_method, or serializer_method should be provided
:param model_type: the JSON API `type` string for the related model
:param id_attribute: the foreign key column on the parent serializer model which identifies the related serializer
:param model_property: the property on the parent serializer model which returns the related serializer
:param model_method: the property on the parent serializer model which returns the related serializer
:param serializer_method: the name of the method on the parent serializer object which uses this schema
which should be called to get the child serializer.
:return: an instance of SchemaRelationship,
which will later be used to serialize Python into JSON API.
"""
identifier_args = [id_attribute, model_property, model_method, serializer_method]
provided_identifiers = [identifier
for identifier in identifier_args
if identifier]
if len(provided_identifiers) > 1:
raise Exception("only one of [{}] should be provided".format(identifier_args.join(", ")))
self.model_type = model_type
self.id_attribute = id_attribute
self.model_property = model_property
self.model_method = model_method
self.serializer_method = serializer_method
self.includes = includes
def related_serializer(self, parent_serializer, relationship_key):
"""
:param parent_serializer: The serializer which has our return value as a related resource
:param relationship_key: The name by which the parent serializer knows this child
:return: The child serializer which will later be used to serialize a related resource
"""
if self.serializer_method is not None:
return getattr(parent_serializer, self.serializer_method)()
model = None
if self.id_attribute is not None:
related_model_getter = self.resource_registry_entry().get(ResourceRegistryKeys.MODEL_GET)
model_id = getattr(parent_serializer.model, self.id_attribute)
if model_id is not None and related_model_getter is not None:
model = related_model_getter(model_id)
elif self.model_property is not None:
model = getattr(parent_serializer.model, self.model_property)
elif self.model_method is not None:
model = getattr(parent_serializer.model, self.model_method)()
if model:
serializer_class = self.resource_registry_entry().get(ResourceRegistryKeys.SERIALIZER)
return serializer_class(
model,
parent_serializer=parent_serializer,
relationship_name=relationship_key,
includes=self.includes
)
else:
from cartographer.serializers import JSONAPINullSerializer
return JSONAPINullSerializer()
def resource_registry_entry(self):
return get_resource_registry().get(self.model_type, {})
| apache-2.0 |
andresgz/django | tests/raw_query/tests.py | 119 | 12624 | from __future__ import unicode_literals
from datetime import date
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Book, BookFkAsPk, Coffee, FriendlyAuthor, Reviewer
class RawQueryTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(first_name='Joe', last_name='Smith', dob=date(1950, 9, 20))
cls.a2 = Author.objects.create(first_name='Jill', last_name='Doe', dob=date(1920, 4, 2))
cls.a3 = Author.objects.create(first_name='Bob', last_name='Smith', dob=date(1986, 1, 25))
cls.a4 = Author.objects.create(first_name='Bill', last_name='Jones', dob=date(1932, 5, 10))
cls.b1 = Book.objects.create(
title='The awesome book', author=cls.a1, paperback=False,
opening_line='It was a bright cold day in April and the clocks were striking thirteen.',
)
cls.b2 = Book.objects.create(
title='The horrible book', author=cls.a1, paperback=True,
opening_line=(
'On an evening in the latter part of May a middle-aged man '
'was walking homeward from Shaston to the village of Marlott, '
'in the adjoining Vale of Blakemore, or Blackmoor.'
),
)
cls.b3 = Book.objects.create(
title='Another awesome book', author=cls.a1, paperback=False,
opening_line='A squat grey building of only thirty-four stories.',
)
cls.b4 = Book.objects.create(
title='Some other book', author=cls.a3, paperback=True,
opening_line='It was the day my grandmother exploded.',
)
cls.c1 = Coffee.objects.create(brand='dunkin doughnuts')
cls.c2 = Coffee.objects.create(brand='starbucks')
cls.r1 = Reviewer.objects.create()
cls.r2 = Reviewer.objects.create()
cls.r1.reviewed.add(cls.b2, cls.b3, cls.b4)
def assertSuccessfulRawQuery(self, model, query, expected_results,
expected_annotations=(), params=[], translations=None):
"""
Execute the passed query against the passed model and check the output
"""
results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
def assertProcessed(self, model, results, orig, expected_annotations=()):
"""
Compare the results of a raw query against expected results
"""
self.assertEqual(len(results), len(orig))
for index, item in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
# Check that all values on the model are equal
self.assertEqual(
getattr(item, field.attname),
getattr(orig_item, field.attname)
)
# This includes checking that they are the same type
self.assertEqual(
type(getattr(item, field.attname)),
type(getattr(orig_item, field.attname))
)
def assertNoAnnotations(self, results):
"""
Check that the results of a raw query contain no annotations
"""
self.assertAnnotations(results, ())
def assertAnnotations(self, results, expected_annotations):
"""
Check that the passed raw query results contain the expected
annotations
"""
if expected_annotations:
for index, result in enumerate(results):
annotation, value = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
def test_simple_raw_query(self):
"""
Basic test of raw query with a simple database query
"""
query = "SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_raw_query_lazy(self):
"""
Raw queries are lazy: they aren't actually executed until they're
iterated over.
"""
q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertIsNone(q.query.cursor)
list(q)
self.assertIsNotNone(q.query.cursor)
def test_FK_raw_query(self):
"""
Test of a simple raw query against a model containing a foreign key
"""
query = "SELECT * FROM raw_query_book"
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
def test_db_column_handler(self):
"""
Test of a simple raw query against a model containing a field with
db_column defined.
"""
query = "SELECT * FROM raw_query_coffee"
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
def test_order_handler(self):
"""
Test of raw raw query's tolerance for columns being returned in any
order
"""
selects = (
('dob, last_name, first_name, id'),
('last_name, dob, first_name, id'),
('first_name, last_name, dob, id'),
)
for select in selects:
query = "SELECT %s FROM raw_query_author" % select
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_translations(self):
"""
Test of raw query's optional ability to translate unexpected result
column names to specific model fields
"""
query = "SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author"
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %s"
author = Author.objects.all()[2]
params = [author.first_name]
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_pyformat_params(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %(first)s"
author = Author.objects.all()[2]
params = {'first': author.first_name}
qset = Author.objects.raw(query, params=params)
results = list(qset)
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
self.assertIsInstance(repr(qset), str)
def test_query_representation(self):
"""
Test representation of raw query with parameters
"""
query = "SELECT * FROM raw_query_author WHERE last_name = %(last)s"
qset = Author.objects.raw(query, {'last': 'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
query = "SELECT * FROM raw_query_author WHERE last_name = %s"
qset = Author.objects.raw(query, {'foo'})
self.assertEqual(repr(qset), "<RawQuerySet: SELECT * FROM raw_query_author WHERE last_name = foo>")
self.assertEqual(repr(qset.query), "<RawQuery: SELECT * FROM raw_query_author WHERE last_name = foo>")
def test_many_to_many(self):
"""
Test of a simple raw query against a model containing a m2m field
"""
query = "SELECT * FROM raw_query_reviewer"
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
def test_extra_conversions(self):
"""
Test to insure that extra translations are ignored.
"""
query = "SELECT * FROM raw_query_author"
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def test_missing_fields(self):
query = "SELECT id, first_name, dob FROM raw_query_author"
for author in Author.objects.raw(query):
self.assertNotEqual(author.first_name, None)
# last_name isn't given, but it will be retrieved on demand
self.assertNotEqual(author.last_name, None)
def test_missing_fields_without_PK(self):
query = "SELECT first_name, dob FROM raw_query_author"
try:
list(Author.objects.raw(query))
self.fail('Query without primary key should fail')
except InvalidQuery:
pass
def test_annotations(self):
query = (
"SELECT a.*, count(b.id) as book_count "
"FROM raw_query_author a "
"LEFT JOIN raw_query_book b ON a.id = b.author_id "
"GROUP BY a.id, a.first_name, a.last_name, a.dob ORDER BY a.id"
)
expected_annotations = (
('book_count', 3),
('book_count', 0),
('book_count', 1),
('book_count', 0),
)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, expected_annotations)
def test_white_space_query(self):
query = " SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def test_multiple_iterations(self):
query = "SELECT * FROM raw_query_author"
normal_authors = Author.objects.all()
raw_authors = Author.objects.raw(query)
# First Iteration
first_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
first_iterations += 1
# Second Iteration
second_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
second_iterations += 1
self.assertEqual(first_iterations, second_iterations)
def test_get_item(self):
# Indexing on RawQuerySets
query = "SELECT * FROM raw_query_author ORDER BY id ASC"
third_author = Author.objects.raw(query)[2]
self.assertEqual(third_author.first_name, 'Bob')
first_two = Author.objects.raw(query)[0:2]
self.assertEqual(len(first_two), 2)
self.assertRaises(TypeError, lambda: Author.objects.raw(query)['test'])
def test_inheritance(self):
# date is the end of the Cuban Missile Crisis, I have no idea when
# Wesley was born
f = FriendlyAuthor.objects.create(first_name="Wesley", last_name="Chun",
dob=date(1962, 10, 28))
query = "SELECT * FROM raw_query_friendlyauthor"
self.assertEqual(
[o.pk for o in FriendlyAuthor.objects.raw(query)], [f.pk]
)
def test_query_count(self):
self.assertNumQueries(1, list, Author.objects.raw("SELECT * FROM raw_query_author"))
def test_subquery_in_raw_sql(self):
try:
list(Book.objects.raw('SELECT id FROM (SELECT * FROM raw_query_book WHERE paperback IS NOT NULL) sq'))
except InvalidQuery:
self.fail("Using a subquery in a RawQuerySet raised InvalidQuery")
def test_db_column_name_is_used_in_raw_query(self):
"""
Regression test that ensures the `column` attribute on the field is
used to generate the list of fields included in the query, as opposed
to the `attname`. This is important when the primary key is a
ForeignKey field because `attname` and `column` are not necessarily the
same.
"""
b = BookFkAsPk.objects.create(book=self.b1)
self.assertEqual(list(BookFkAsPk.objects.raw('SELECT not_the_default FROM raw_query_bookfkaspk')), [b])
| bsd-3-clause |
ecino/compassion-switzerland | account_reconcile_compassion/models/statement_operation.py | 3 | 1033 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class AccountOperationTemplate(models.Model):
_inherit = 'account.reconcile.model'
@api.model
def product_changed(self, product_id):
"""
Helper to get the account and analytic account in reconcile view.
:param product_id:
:return: account_id, analytic_id
"""
res = super(AccountOperationTemplate, self).product_changed(product_id)
if product_id:
analytic_id = self.env['account.analytic.default'].account_get(
product_id).analytic_id.id
res['analytic_id'] = analytic_id
return res
| agpl-3.0 |
mdaniel/intellij-community | python/helpers/coveragepy/coverage/report.py | 47 | 3608 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Reporter foundation for coverage.py."""
import os
import warnings
from coverage.files import prep_patterns, FnmatchMatcher
from coverage.misc import CoverageException, NoSource, NotPython, isolate_module
os = isolate_module(os)
class Reporter(object):
"""A base class for all reporters."""
def __init__(self, coverage, config):
"""Create a reporter.
`coverage` is the coverage instance. `config` is an instance of
CoverageConfig, for controlling all sorts of behavior.
"""
self.coverage = coverage
self.config = config
# The directory into which to place the report, used by some derived
# classes.
self.directory = None
# Our method find_file_reporters used to set an attribute that other
# code could read. That's been refactored away, but some third parties
# were using that attribute. We'll continue to support it in a noisy
# way for now.
self._file_reporters = []
@property
def file_reporters(self):
"""Keep .file_reporters working for private-grabbing tools."""
warnings.warn(
"Report.file_reporters will no longer be available in Coverage.py 4.2",
DeprecationWarning,
)
return self._file_reporters
def find_file_reporters(self, morfs):
"""Find the FileReporters we'll report on.
`morfs` is a list of modules or file names.
Returns a list of FileReporters.
"""
reporters = self.coverage._get_file_reporters(morfs)
if self.config.include:
matcher = FnmatchMatcher(prep_patterns(self.config.include))
reporters = [fr for fr in reporters if matcher.match(fr.filename)]
if self.config.omit:
matcher = FnmatchMatcher(prep_patterns(self.config.omit))
reporters = [fr for fr in reporters if not matcher.match(fr.filename)]
self._file_reporters = sorted(reporters)
return self._file_reporters
def report_files(self, report_fn, morfs, directory=None):
"""Run a reporting function on a number of morfs.
`report_fn` is called for each relative morf in `morfs`. It is called
as::
report_fn(file_reporter, analysis)
where `file_reporter` is the `FileReporter` for the morf, and
`analysis` is the `Analysis` for the morf.
"""
file_reporters = self.find_file_reporters(morfs)
if not file_reporters:
raise CoverageException("No data to report.")
self.directory = directory
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
for fr in file_reporters:
try:
report_fn(fr, self.coverage._analyze(fr))
except NoSource:
if not self.config.ignore_errors:
raise
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
# NotPython is only raised by PythonFileReporter, which has a
# should_be_python() method.
if fr.should_be_python():
if self.config.ignore_errors:
self.coverage._warn("Could not parse Python file {0}".format(fr.filename))
else:
raise
| apache-2.0 |
avtoritet/httpie | httpie/client.py | 25 | 4019 | import json
import sys
from pprint import pformat
import requests
from requests.packages import urllib3
from httpie import sessions
from httpie import __version__
from httpie.compat import str
from httpie.plugins import plugin_manager
# https://urllib3.readthedocs.org/en/latest/security.html
urllib3.disable_warnings()
FORM = 'application/x-www-form-urlencoded; charset=utf-8'
JSON = 'application/json'
DEFAULT_UA = 'HTTPie/%s' % __version__
def get_requests_session():
requests_session = requests.Session()
for cls in plugin_manager.get_transport_plugins():
transport_plugin = cls()
requests_session.mount(prefix=transport_plugin.prefix,
adapter=transport_plugin.get_adapter())
return requests_session
def get_response(args, config_dir):
"""Send the request and return a `request.Response`."""
requests_session = get_requests_session()
if not args.session and not args.session_read_only:
kwargs = get_requests_kwargs(args)
if args.debug:
dump_request(kwargs)
response = requests_session.request(**kwargs)
else:
response = sessions.get_response(
requests_session=requests_session,
args=args,
config_dir=config_dir,
session_name=args.session or args.session_read_only,
read_only=bool(args.session_read_only),
)
return response
def dump_request(kwargs):
sys.stderr.write('\n>>> requests.request(**%s)\n\n'
% pformat(kwargs))
def encode_headers(headers):
# This allows for unicode headers which is non-standard but practical.
# See: https://github.com/jkbrzt/httpie/issues/212
return dict(
(name, value.encode('utf8') if isinstance(value, str) else value)
for name, value in headers.items()
)
def get_default_headers(args):
default_headers = {
'User-Agent': DEFAULT_UA
}
auto_json = args.data and not args.form
# FIXME: Accept is set to JSON with `http url @./file.txt`.
if args.json or auto_json:
default_headers['Accept'] = 'application/json'
if args.json or (auto_json and args.data):
default_headers['Content-Type'] = JSON
elif args.form and not args.files:
# If sending files, `requests` will set
# the `Content-Type` for us.
default_headers['Content-Type'] = FORM
return default_headers
def get_requests_kwargs(args, base_headers=None):
"""
Translate our `args` into `requests.request` keyword arguments.
"""
# Serialize JSON data, if needed.
data = args.data
auto_json = data and not args.form
if (args.json or auto_json) and isinstance(data, dict):
if data:
data = json.dumps(data)
else:
# We need to set data to an empty string to prevent requests
# from assigning an empty list to `response.request.data`.
data = ''
# Finalize headers.
headers = get_default_headers(args)
if base_headers:
headers.update(base_headers)
headers.update(args.headers)
headers = encode_headers(headers)
credentials = None
if args.auth:
auth_plugin = plugin_manager.get_auth_plugin(args.auth_type)()
credentials = auth_plugin.get_auth(args.auth.key, args.auth.value)
cert = None
if args.cert:
cert = args.cert
if args.cert_key:
cert = cert, args.cert_key
kwargs = {
'stream': True,
'method': args.method.lower(),
'url': args.url,
'headers': headers,
'data': data,
'verify': {
'yes': True,
'no': False
}.get(args.verify, args.verify),
'cert': cert,
'timeout': args.timeout,
'auth': credentials,
'proxies': dict((p.key, p.value) for p in args.proxy),
'files': args.files,
'allow_redirects': args.follow,
'params': args.params,
}
return kwargs
| bsd-3-clause |
SamuelYvon/radish | docs/conf.py | 1 | 9715 | # -*- coding: utf-8 -*-
#
# radish documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 13 21:58:55 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# sphinx rtd theme
# import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'radish'
copyright = u'2015, Timo Furrer'
author = u'Timo Furrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.1'
# The full version, including alpha/beta/rc tags.
release = '0.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"logo": "radish-bdd-logo.png",
"logo_name": "radish",
"github_user": "radish-bdd",
"github_repo": "radish",
"github_button": True,
"github_banner": True
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/radish-bdd-logo-trans-bg.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/radish-bdd-logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'radishdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'radish.tex', u'radish Documentation',
u'Timo Furrer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'radish', u'radish Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'radish', u'radish Documentation',
author, 'radish', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit |
RyuKojiro/YouCompleteMe | python/ycm/diagnostic_interface.py | 33 | 8511 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict, namedtuple
from ycm import vimsupport
import vim
class DiagnosticInterface( object ):
def __init__( self, user_options ):
self._user_options = user_options
# Line and column numbers are 1-based
self._buffer_number_to_line_to_diags = defaultdict(
lambda: defaultdict( list ) )
self._next_sign_id = 1
self._previous_line_number = -1
self._diag_message_needs_clearing = False
self._placed_signs = []
def OnCursorMoved( self ):
line, _ = vimsupport.CurrentLineAndColumn()
line += 1 # Convert to 1-based
if line != self._previous_line_number:
self._previous_line_number = line
if self._user_options[ 'echo_current_diagnostic' ]:
self._EchoDiagnosticForLine( line )
def UpdateWithNewDiagnostics( self, diags ):
normalized_diags = [ _NormalizeDiagnostic( x ) for x in diags ]
self._buffer_number_to_line_to_diags = _ConvertDiagListToDict(
normalized_diags )
if self._user_options[ 'enable_diagnostic_signs' ]:
self._placed_signs, self._next_sign_id = _UpdateSigns(
self._placed_signs,
self._buffer_number_to_line_to_diags,
self._next_sign_id )
if self._user_options[ 'enable_diagnostic_highlighting' ]:
_UpdateSquiggles( self._buffer_number_to_line_to_diags )
if self._user_options[ 'always_populate_location_list' ]:
vimsupport.SetLocationList(
vimsupport.ConvertDiagnosticsToQfList( normalized_diags ) )
def _EchoDiagnosticForLine( self, line_num ):
buffer_num = vim.current.buffer.number
diags = self._buffer_number_to_line_to_diags[ buffer_num ][ line_num ]
if not diags:
if self._diag_message_needs_clearing:
# Clear any previous diag echo
vimsupport.EchoText( '', False )
self._diag_message_needs_clearing = False
return
text = diags[ 0 ][ 'text' ]
if diags[ 0 ].get( 'fixit_available', False ):
text += ' (FixIt)'
vimsupport.EchoTextVimWidth( text )
self._diag_message_needs_clearing = True
def _UpdateSquiggles( buffer_number_to_line_to_diags ):
vimsupport.ClearYcmSyntaxMatches()
line_to_diags = buffer_number_to_line_to_diags[ vim.current.buffer.number ]
for diags in line_to_diags.itervalues():
for diag in diags:
location_extent = diag[ 'location_extent' ]
is_error = _DiagnosticIsError( diag )
if location_extent[ 'start' ][ 'line_num' ] < 0:
location = diag[ 'location' ]
vimsupport.AddDiagnosticSyntaxMatch(
location[ 'line_num' ],
location[ 'column_num' ] )
else:
vimsupport.AddDiagnosticSyntaxMatch(
location_extent[ 'start' ][ 'line_num' ],
location_extent[ 'start' ][ 'column_num' ],
location_extent[ 'end' ][ 'line_num' ],
location_extent[ 'end' ][ 'column_num' ],
is_error = is_error )
for diag_range in diag[ 'ranges' ]:
vimsupport.AddDiagnosticSyntaxMatch(
diag_range[ 'start' ][ 'line_num' ],
diag_range[ 'start' ][ 'column_num' ],
diag_range[ 'end' ][ 'line_num' ],
diag_range[ 'end' ][ 'column_num' ],
is_error = is_error )
def _UpdateSigns( placed_signs, buffer_number_to_line_to_diags, next_sign_id ):
new_signs, kept_signs, next_sign_id = _GetKeptAndNewSigns(
placed_signs, buffer_number_to_line_to_diags, next_sign_id
)
# Dummy sign used to prevent "flickering" in Vim when last mark gets
# deleted from buffer. Dummy sign prevents Vim to collapsing the sign column
# in that case.
# There's also a vim bug which causes the whole window to redraw in some
# conditions (vim redraw logic is very complex). But, somehow, if we place a
# dummy sign before placing other "real" signs, it will not redraw the
# buffer (patch to vim pending).
dummy_sign_needed = not kept_signs and new_signs
if dummy_sign_needed:
vimsupport.PlaceDummySign( next_sign_id + 1,
vim.current.buffer.number,
new_signs[ 0 ].line )
# We place only those signs that haven't been placed yet.
new_placed_signs = _PlaceNewSigns( kept_signs, new_signs )
# We use incremental placement, so signs that already placed on the correct
# lines will not be deleted and placed again, which should improve performance
# in case of many diags. Signs which don't exist in the current diag should be
# deleted.
_UnplaceObsoleteSigns( kept_signs, placed_signs )
if dummy_sign_needed:
vimsupport.UnPlaceDummySign( next_sign_id + 1, vim.current.buffer.number )
return new_placed_signs, next_sign_id
def _GetKeptAndNewSigns( placed_signs, buffer_number_to_line_to_diags,
next_sign_id ):
new_signs = []
kept_signs = []
for buffer_number, line_to_diags in buffer_number_to_line_to_diags.iteritems():
if not vimsupport.BufferIsVisible( buffer_number ):
continue
for line, diags in line_to_diags.iteritems():
for diag in diags:
sign = _DiagSignPlacement( next_sign_id,
line,
buffer_number,
_DiagnosticIsError( diag ) )
if sign not in placed_signs:
new_signs += [ sign ]
next_sign_id += 1
else:
# We use .index here because `sign` contains a new id, but
# we need the sign with the old id to unplace it later on.
# We won't be placing the new sign.
kept_signs += [ placed_signs[ placed_signs.index( sign ) ] ]
return new_signs, kept_signs, next_sign_id
def _PlaceNewSigns( kept_signs, new_signs ):
placed_signs = kept_signs[:]
for sign in new_signs:
# Do not set two signs on the same line, it will screw up storing sign
# locations.
if sign in placed_signs:
continue
vimsupport.PlaceSign( sign.id, sign.line, sign.buffer, sign.is_error )
placed_signs.append(sign)
return placed_signs
def _UnplaceObsoleteSigns( kept_signs, placed_signs ):
for sign in placed_signs:
if sign not in kept_signs:
vimsupport.UnplaceSignInBuffer( sign.buffer, sign.id )
def _ConvertDiagListToDict( diag_list ):
buffer_to_line_to_diags = defaultdict( lambda: defaultdict( list ) )
for diag in diag_list:
location = diag[ 'location' ]
buffer_number = vimsupport.GetBufferNumberForFilename(
location[ 'filepath' ] )
line_number = location[ 'line_num' ]
buffer_to_line_to_diags[ buffer_number ][ line_number ].append( diag )
for line_to_diags in buffer_to_line_to_diags.itervalues():
for diags in line_to_diags.itervalues():
# We also want errors to be listed before warnings so that errors aren't
# hidden by the warnings; Vim won't place a sign oven an existing one.
diags.sort( key = lambda diag: ( diag[ 'location' ][ 'column_num' ],
diag[ 'kind' ] ) )
return buffer_to_line_to_diags
def _DiagnosticIsError( diag ):
return diag[ 'kind' ] == 'ERROR'
def _NormalizeDiagnostic( diag ):
def ClampToOne( value ):
return value if value > 0 else 1
location = diag[ 'location' ]
location[ 'column_num' ] = ClampToOne( location[ 'column_num' ] )
location[ 'line_num' ] = ClampToOne( location[ 'line_num' ] )
return diag
class _DiagSignPlacement( namedtuple( "_DiagSignPlacement",
[ 'id', 'line', 'buffer', 'is_error' ] ) ):
# We want two signs that have different ids but the same location to compare
# equal. ID doesn't matter.
def __eq__( self, other ):
return ( self.line == other.line and
self.buffer == other.buffer and
self.is_error == other.is_error )
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/elasticsearch/helpers/test.py | 7 | 1851 | import time
import os
try:
# python 2.6
from unittest2 import TestCase, SkipTest
except ImportError:
from unittest import TestCase, SkipTest
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
def get_test_client(nowait=False, **kwargs):
# construct kwargs from the environment
kw = {'timeout': 30}
if 'TEST_ES_CONNECTION' in os.environ:
from elasticsearch import connection
kw['connection_class'] = getattr(connection, os.environ['TEST_ES_CONNECTION'])
kw.update(kwargs)
client = Elasticsearch([os.environ.get('TEST_ES_SERVER', {})], **kw)
# wait for yellow status
for _ in range(1 if nowait else 100):
try:
client.cluster.health(wait_for_status='yellow')
return client
except ConnectionError:
time.sleep(.1)
else:
# timeout
raise SkipTest("Elasticsearch failed to start.")
def _get_version(version_string):
if '.' not in version_string:
return ()
version = version_string.strip().split('.')
return tuple(int(v) if v.isdigit() else 999 for v in version)
class ElasticsearchTestCase(TestCase):
@staticmethod
def _get_client():
return get_test_client()
@classmethod
def setUpClass(cls):
super(ElasticsearchTestCase, cls).setUpClass()
cls.client = cls._get_client()
def tearDown(self):
super(ElasticsearchTestCase, self).tearDown()
self.client.indices.delete(index='*', ignore=404)
self.client.indices.delete_template(name='*', ignore=404)
@property
def es_version(self):
if not hasattr(self, '_es_version'):
version_string = self.client.info()['version']['number']
self._es_version = _get_version(version_string)
return self._es_version
| apache-2.0 |
devincoughlin/swift | utils/incrparse/incr_transfer_tree.py | 16 | 3754 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from test_util import TestFailedError, run_command, \
serializeIncrParseMarkupFile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax tree transfer',
epilog='''
Based of a single template the utility generates a pre-edit and a post-edit
file. It then verifies that the incrementally transferred syntax tree
matches the syntax tree passed as --expected-incremental-syntax-tree.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case'
''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all '
'unnamed substitutions are applied')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be '
'saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--expected-incremental-syntax-tree', required=True,
help='The path to a file that contains the expected incrementally '
'transferred syntax tree')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_file_name = os.path.basename(test_file)
test_case = args.test_case
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
expected_syntax_tree_file = args.expected_incremental_syntax_tree
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
incremental_serialized_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.incr.json'
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='incremental',
serialization_mode='incremental',
serialization_format='json',
omit_node_ids=False,
output_file=incremental_serialized_file,
temp_dir=temp_dir + '/temp',
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
incremental_serialized_file,
expected_syntax_tree_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Syntax tree of incremental parsing does not match expected '
'incrementally transfer syntax tree:\n\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
gonboy/sl4a | python/src/Lib/ctypes/test/test_strings.py | 51 | 6957 | import unittest
from ctypes import *
class StringArrayTestCase(unittest.TestCase):
def test(self):
BUF = c_char * 4
buf = BUF("a", "b", "c")
self.failUnlessEqual(buf.value, "abc")
self.failUnlessEqual(buf.raw, "abc\000")
buf.value = "ABCD"
self.failUnlessEqual(buf.value, "ABCD")
self.failUnlessEqual(buf.raw, "ABCD")
buf.value = "x"
self.failUnlessEqual(buf.value, "x")
self.failUnlessEqual(buf.raw, "x\000CD")
buf[1] = "Z"
self.failUnlessEqual(buf.value, "xZCD")
self.failUnlessEqual(buf.raw, "xZCD")
self.assertRaises(ValueError, setattr, buf, "value", "aaaaaaaa")
self.assertRaises(TypeError, setattr, buf, "value", 42)
def test_c_buffer_value(self):
buf = c_buffer(32)
buf.value = "Hello, World"
self.failUnlessEqual(buf.value, "Hello, World")
self.failUnlessRaises(TypeError, setattr, buf, "value", buffer("Hello, World"))
self.assertRaises(TypeError, setattr, buf, "value", buffer("abc"))
self.assertRaises(ValueError, setattr, buf, "raw", buffer("x" * 100))
def test_c_buffer_raw(self):
buf = c_buffer(32)
buf.raw = buffer("Hello, World")
self.failUnlessEqual(buf.value, "Hello, World")
self.assertRaises(TypeError, setattr, buf, "value", buffer("abc"))
self.assertRaises(ValueError, setattr, buf, "raw", buffer("x" * 100))
def test_param_1(self):
BUF = c_char * 4
buf = BUF()
## print c_char_p.from_param(buf)
def test_param_2(self):
BUF = c_char * 4
buf = BUF()
## print BUF.from_param(c_char_p("python"))
## print BUF.from_param(BUF(*"pyth"))
try:
c_wchar
except NameError:
pass
else:
class WStringArrayTestCase(unittest.TestCase):
def test(self):
BUF = c_wchar * 4
buf = BUF(u"a", u"b", u"c")
self.failUnlessEqual(buf.value, u"abc")
buf.value = u"ABCD"
self.failUnlessEqual(buf.value, u"ABCD")
buf.value = u"x"
self.failUnlessEqual(buf.value, u"x")
buf[1] = u"Z"
self.failUnlessEqual(buf.value, u"xZCD")
class StringTestCase(unittest.TestCase):
def XX_test_basic_strings(self):
cs = c_string("abcdef")
# Cannot call len on a c_string any longer
self.assertRaises(TypeError, len, cs)
self.failUnlessEqual(sizeof(cs), 7)
# The value property is the string up to the first terminating NUL.
self.failUnlessEqual(cs.value, "abcdef")
self.failUnlessEqual(c_string("abc\000def").value, "abc")
# The raw property is the total buffer contents:
self.failUnlessEqual(cs.raw, "abcdef\000")
self.failUnlessEqual(c_string("abc\000def").raw, "abc\000def\000")
# We can change the value:
cs.value = "ab"
self.failUnlessEqual(cs.value, "ab")
self.failUnlessEqual(cs.raw, "ab\000\000\000\000\000")
cs.raw = "XY"
self.failUnlessEqual(cs.value, "XY")
self.failUnlessEqual(cs.raw, "XY\000\000\000\000\000")
self.assertRaises(TypeError, c_string, u"123")
def XX_test_sized_strings(self):
# New in releases later than 0.4.0:
self.assertRaises(TypeError, c_string, None)
# New in releases later than 0.4.0:
# c_string(number) returns an empty string of size number
self.failUnless(len(c_string(32).raw) == 32)
self.assertRaises(ValueError, c_string, -1)
self.assertRaises(ValueError, c_string, 0)
# These tests fail, because it is no longer initialized
## self.failUnless(c_string(2).value == "")
## self.failUnless(c_string(2).raw == "\000\000")
self.failUnless(c_string(2).raw[-1] == "\000")
self.failUnless(len(c_string(2).raw) == 2)
def XX_test_initialized_strings(self):
self.failUnless(c_string("ab", 4).raw[:2] == "ab")
self.failUnless(c_string("ab", 4).raw[:2:] == "ab")
self.failUnless(c_string("ab", 4).raw[:2:-1] == "ba")
self.failUnless(c_string("ab", 4).raw[:2:2] == "a")
self.failUnless(c_string("ab", 4).raw[-1] == "\000")
self.failUnless(c_string("ab", 2).raw == "a\000")
def XX_test_toolong(self):
cs = c_string("abcdef")
# Much too long string:
self.assertRaises(ValueError, setattr, cs, "value", "123456789012345")
# One char too long values:
self.assertRaises(ValueError, setattr, cs, "value", "1234567")
## def test_perf(self):
## check_perf()
try:
c_wchar
except NameError:
pass
else:
class WStringTestCase(unittest.TestCase):
def test_wchar(self):
c_wchar(u"x")
repr(byref(c_wchar(u"x")))
c_wchar("x")
def X_test_basic_wstrings(self):
cs = c_wstring(u"abcdef")
# XXX This behaviour is about to change:
# len returns the size of the internal buffer in bytes.
# This includes the terminating NUL character.
self.failUnless(sizeof(cs) == 14)
# The value property is the string up to the first terminating NUL.
self.failUnless(cs.value == u"abcdef")
self.failUnless(c_wstring(u"abc\000def").value == u"abc")
self.failUnless(c_wstring(u"abc\000def").value == u"abc")
# The raw property is the total buffer contents:
self.failUnless(cs.raw == u"abcdef\000")
self.failUnless(c_wstring(u"abc\000def").raw == u"abc\000def\000")
# We can change the value:
cs.value = u"ab"
self.failUnless(cs.value == u"ab")
self.failUnless(cs.raw == u"ab\000\000\000\000\000")
self.assertRaises(TypeError, c_wstring, "123")
self.assertRaises(ValueError, c_wstring, 0)
def X_test_toolong(self):
cs = c_wstring(u"abcdef")
# Much too long string:
self.assertRaises(ValueError, setattr, cs, "value", u"123456789012345")
# One char too long values:
self.assertRaises(ValueError, setattr, cs, "value", u"1234567")
def run_test(rep, msg, func, arg):
items = range(rep)
from time import clock
start = clock()
for i in items:
func(arg); func(arg); func(arg); func(arg); func(arg)
stop = clock()
print "%20s: %.2f us" % (msg, ((stop-start)*1e6/5/rep))
def check_perf():
# Construct 5 objects
REP = 200000
run_test(REP, "c_string(None)", c_string, None)
run_test(REP, "c_string('abc')", c_string, 'abc')
# Python 2.3 -OO, win2k, P4 700 MHz:
#
# c_string(None): 1.75 us
# c_string('abc'): 2.74 us
# Python 2.2 -OO, win2k, P4 700 MHz:
#
# c_string(None): 2.95 us
# c_string('abc'): 3.67 us
if __name__ == '__main__':
## check_perf()
unittest.main()
| apache-2.0 |
roadmapper/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_shell.py | 23 | 13417 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015-16, Ritesh Khadgaray <khadgaray () gmail.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vm_shell
short_description: Run commands in a VMware guest operating system
description:
- Module allows user to run common system administration commands in the guest operating system.
version_added: "2.1"
author:
- Ritesh Khadgaray (@ritzk)
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 5.5, 6.0 and 6.5.
- Only the first match against vm_id is used, even if there are multiple matches.
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- The datacenter hosting the virtual machine.
- If set, it will help to speed up virtual machine search.
type: str
cluster:
description:
- The cluster hosting the virtual machine.
- If set, it will help to speed up virtual machine search.
type: str
folder:
description:
- Destination folder, absolute or relative path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
version_added: "2.4"
type: str
vm_id:
description:
- Name of the virtual machine to work with.
required: True
type: str
vm_id_type:
description:
- The VMware identification method by which the virtual machine will be identified.
default: vm_name
choices: ['uuid', 'instance_uuid', 'dns_name', 'inventory_path', 'vm_name']
type: str
vm_username:
description:
- The user to login-in to the virtual machine.
required: True
type: str
vm_password:
description:
- The password used to login-in to the virtual machine.
required: True
type: str
vm_shell:
description:
- The absolute path to the program to start.
- On Linux, shell is executed via bash.
required: True
type: str
vm_shell_args:
description:
- The argument to the program.
- The characters which must be escaped to the shell also be escaped on the command line provided.
default: " "
type: str
vm_shell_env:
description:
- Comma separated list of environment variable, specified in the guest OS notation.
type: list
vm_shell_cwd:
description:
- The current working directory of the application from which it will be run.
type: str
wait_for_process:
description:
- If set to C(True), module will wait for process to complete in the given virtual machine.
default: False
type: bool
version_added: 2.7
timeout:
description:
- Timeout in seconds.
- If set to positive integers, then C(wait_for_process) will honor this parameter and will exit after this timeout.
default: 3600
version_added: 2.7
type: int
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Run command inside a virtual machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: root
vm_password: superSecret
vm_shell: /bin/echo
vm_shell_args: " $var >> myFile "
vm_shell_env:
- "PATH=/bin"
- "VAR=test"
vm_shell_cwd: "/tmp"
delegate_to: localhost
register: shell_command_output
- name: Run command inside a virtual machine with wait and timeout
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: NameOfVM
vm_username: root
vm_password: superSecret
vm_shell: /bin/sleep
vm_shell_args: 100
wait_for_process: True
timeout: 2000
delegate_to: localhost
register: shell_command_with_wait_timeout
- name: Change user password in the guest machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: sample
vm_password: old_password
vm_shell: "/bin/echo"
vm_shell_args: "-e 'old_password\nnew_password\nnew_password' | passwd sample > /tmp/$$.txt 2>&1"
delegate_to: localhost
- name: Change hostname of guest machine
vmware_vm_shell:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
folder: "/{{datacenter}}/vm"
vm_id: "{{ vm_name }}"
vm_username: testUser
vm_password: SuperSecretPassword
vm_shell: "/usr/bin/hostnamectl"
vm_shell_args: "set-hostname new_hostname > /tmp/$$.txt 2>&1"
delegate_to: localhost
'''
RETURN = r'''
results:
description: metadata about the new process after completion with wait_for_process
returned: on success
type: dict
sample:
{
"cmd_line": "\"/bin/sleep\" 1",
"end_time": "2018-04-26T05:03:21+00:00",
"exit_code": 0,
"name": "sleep",
"owner": "dev1",
"start_time": "2018-04-26T05:03:19+00:00",
"uuid": "564db1e2-a3ff-3b0e-8b77-49c25570bb66",
}
'''
import time
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, find_cluster_by_name,
find_datacenter_by_name, find_vm_by_id,
vmware_argument_spec)
class VMwareShellManager(PyVmomi):
def __init__(self, module):
super(VMwareShellManager, self).__init__(module)
datacenter_name = module.params['datacenter']
cluster_name = module.params['cluster']
folder = module.params['folder']
self.pm = self.content.guestOperationsManager.processManager
self.timeout = self.params.get('timeout', 3600)
self.wait_for_pid = self.params.get('wait_for_process', False)
datacenter = None
if datacenter_name:
datacenter = find_datacenter_by_name(self.content, datacenter_name)
if not datacenter:
module.fail_json(changed=False, msg="Unable to find %(datacenter)s datacenter" % module.params)
cluster = None
if cluster_name:
cluster = find_cluster_by_name(self.content, cluster_name, datacenter)
if not cluster:
module.fail_json(changed=False, msg="Unable to find %(cluster)s cluster" % module.params)
if module.params['vm_id_type'] == 'inventory_path':
vm = find_vm_by_id(self.content,
vm_id=module.params['vm_id'],
vm_id_type="inventory_path",
folder=folder)
else:
vm = find_vm_by_id(self.content,
vm_id=module.params['vm_id'],
vm_id_type=module.params['vm_id_type'],
datacenter=datacenter,
cluster=cluster)
if not vm:
module.fail_json(msg='Unable to find virtual machine.')
tools_status = vm.guest.toolsStatus
if tools_status in ['toolsNotInstalled', 'toolsNotRunning']:
self.module.fail_json(msg="VMwareTools is not installed or is not running in the guest."
" VMware Tools are necessary to run this module.")
try:
self.execute_command(vm, module.params)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(changed=False, msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
module.fail_json(changed=False, msg=to_native(method_fault.msg))
except Exception as e:
module.fail_json(changed=False, msg=to_native(e))
def execute_command(self, vm, params):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
vm_username = params['vm_username']
vm_password = params['vm_password']
program_path = params['vm_shell']
args = params['vm_shell_args']
env = params['vm_shell_env']
cwd = params['vm_shell_cwd']
credentials = vim.vm.guest.NamePasswordAuthentication(username=vm_username,
password=vm_password)
cmd_spec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args,
envVariables=env,
programPath=program_path,
workingDirectory=cwd)
res = self.pm.StartProgramInGuest(vm=vm, auth=credentials, spec=cmd_spec)
if self.wait_for_pid:
res_data = self.wait_for_process(vm, res, credentials)
results = dict(uuid=vm.summary.config.uuid,
owner=res_data.owner,
start_time=res_data.startTime.isoformat(),
end_time=res_data.endTime.isoformat(),
exit_code=res_data.exitCode,
name=res_data.name,
cmd_line=res_data.cmdLine)
if res_data.exitCode != 0:
results['msg'] = "Failed to execute command"
results['changed'] = False
results['failed'] = True
self.module.fail_json(**results)
else:
results['changed'] = True
results['failed'] = False
self.module.exit_json(**results)
else:
self.module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=res)
def process_exists_in_guest(self, vm, pid, creds):
res = self.pm.ListProcessesInGuest(vm, creds, pids=[pid])
if not res:
self.module.fail_json(
changed=False, msg='ListProcessesInGuest: None (unexpected)')
res = res[0]
if res.exitCode is None:
return True, None
else:
return False, res
def wait_for_process(self, vm, pid, creds):
start_time = time.time()
while True:
current_time = time.time()
process_status, res_data = self.process_exists_in_guest(vm, pid, creds)
if not process_status:
return res_data
elif current_time - start_time >= self.timeout:
self.module.fail_json(
msg="Timeout waiting for process to complete.",
vm=vm._moId,
pid=pid,
start_time=start_time,
current_time=current_time,
timeout=self.timeout)
else:
time.sleep(5)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter=dict(type='str'),
cluster=dict(type='str'),
folder=dict(type='str'),
vm_id=dict(type='str', required=True),
vm_id_type=dict(default='vm_name', type='str',
choices=['inventory_path',
'uuid',
'instance_uuid',
'dns_name',
'vm_name']),
vm_username=dict(type='str', required=True),
vm_password=dict(type='str', no_log=True, required=True),
vm_shell=dict(type='str', required=True),
vm_shell_args=dict(default=" ", type='str'),
vm_shell_env=dict(type='list'),
vm_shell_cwd=dict(type='str'),
wait_for_process=dict(type='bool', default=False),
timeout=dict(type='int', default=3600),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['vm_id_type', 'inventory_path', ['folder']]
],
)
vm_shell_mgr = VMwareShellManager(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
Stanford-Online/edx-platform | common/djangoapps/course_modes/migrations/0008_course_key_field_to_foreign_key.py | 13 | 2277 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
# This should only be used for migrations that have be verified to have a net-neutral sql
# change generated by Django
class NoSqlAlterField(migrations.AlterField):
def database_forwards(self, app_label, schema_editor, from_state, to_state):
return
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return
class Migration(migrations.Migration):
dependencies = [
('course_overviews', '0013_courseoverview_language'),
('course_modes', '0007_coursemode_bulk_sku'),
]
operations = [
# Pin the name of the column in the database so that we can rename the field
# in Django without generating any sql changes
migrations.AlterField(
model_name='coursemode',
name='course_id',
field=CourseKeyField(max_length=255, db_index=True, verbose_name="Course", db_column='course_id'),
),
# Change the field name in Django to match our target field name
migrations.RenameField(
model_name='coursemode',
old_name='course_id',
new_name='course',
),
# Change the type of the field in Django to be a foreign key
# N.B. we don't need the db_column declaration because the default
# for Django is to use ${field_name}_id (which is what we pinned the column
# name to above).
# We deliberately leave db_constraint set to False because the column
# isn't currently constrained
NoSqlAlterField(
model_name='coursemode',
name='course',
field=models.ForeignKey(related_name='modes', db_constraint=False, default=None, to='course_overviews.CourseOverview', on_delete=models.CASCADE),
preserve_default=False,
),
# Change the Django unique-together constraint (this is Django-level only
# since the database column constraint already exists).
migrations.AlterUniqueTogether(
name='coursemode',
unique_together=set([('course', 'mode_slug', 'currency')]),
),
]
| agpl-3.0 |
MiLk/ansible | lib/ansible/module_utils/facts/hardware/base.py | 60 | 1746 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Hardware:
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
def populate(self, collected_facts=None):
return {}
class HardwareCollector(BaseFactCollector):
name = 'hardware'
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
# TODO: mounts isnt exactly hardware
'mounts',
'devices'])
_fact_class = Hardware
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| gpl-3.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/tools/tool_validator.py | 1 | 19484 | import filecmp
import logging
import os
import tempfile
from galaxy.tools import Tool
from galaxy.tools import parameters
from galaxy.tools.parameters import dynamic_options
from tool_shed.tools import data_table_manager
from tool_shed.util import basic_util
from tool_shed.util import hg_util
from tool_shed.util import shed_util_common as suc
from tool_shed.util import tool_util
from tool_shed.util import xml_util
log = logging.getLogger( __name__ )
class ToolValidator( object ):
def __init__( self, app ):
self.app = app
self.tdtm = data_table_manager.ToolDataTableManager( self.app )
def can_use_tool_config_disk_file( self, repository, repo, file_path, changeset_revision ):
"""
Determine if repository's tool config file on disk can be used. This method
is restricted to tool config files since, with the exception of tool config
files, multiple files with the same name will likely be in various directories
in the repository and we're comparing file names only (not relative paths).
"""
if not file_path or not os.path.exists( file_path ):
# The file no longer exists on disk, so it must have been deleted at some previous
# point in the change log.
return False
if changeset_revision == repository.tip( self.app ):
return True
file_name = basic_util.strip_path( file_path )
latest_version_of_file = \
self.get_latest_tool_config_revision_from_repository_manifest( repo, file_name, changeset_revision )
can_use_disk_file = filecmp.cmp( file_path, latest_version_of_file )
try:
os.unlink( latest_version_of_file )
except:
pass
return can_use_disk_file
def check_tool_input_params( self, repo_dir, tool_config_name, tool, sample_files ):
"""
Check all of the tool's input parameters, looking for any that are dynamically
generated using external data files to make sure the files exist.
"""
invalid_files_and_errors_tups = []
correction_msg = ''
for input_param in tool.input_params:
if isinstance( input_param, parameters.basic.SelectToolParameter ) and input_param.is_dynamic:
# If the tool refers to .loc files or requires an entry in the tool_data_table_conf.xml,
# make sure all requirements exist.
options = input_param.dynamic_options or input_param.options
if options and isinstance( options, dynamic_options.DynamicOptions ):
if options.tool_data_table or options.missing_tool_data_table_name:
# Make sure the repository contains a tool_data_table_conf.xml.sample file.
sample_tool_data_table_conf = hg_util.get_config_from_disk( 'tool_data_table_conf.xml.sample', repo_dir )
if sample_tool_data_table_conf:
error, correction_msg = \
self.tdtm.handle_sample_tool_data_table_conf_file( sample_tool_data_table_conf,
persist=False )
if error:
invalid_files_and_errors_tups.append( ( 'tool_data_table_conf.xml.sample', correction_msg ) )
else:
options.missing_tool_data_table_name = None
else:
correction_msg = "This file requires an entry in the tool_data_table_conf.xml file. "
correction_msg += "Upload a file named tool_data_table_conf.xml.sample to the repository "
correction_msg += "that includes the required entry to correct this error.<br/>"
invalid_tup = ( tool_config_name, correction_msg )
if invalid_tup not in invalid_files_and_errors_tups:
invalid_files_and_errors_tups.append( invalid_tup )
if options.index_file or options.missing_index_file:
# Make sure the repository contains the required xxx.loc.sample file.
index_file = options.index_file or options.missing_index_file
index_file_name = basic_util.strip_path( index_file )
sample_found = False
for sample_file in sample_files:
sample_file_name = basic_util.strip_path( sample_file )
if sample_file_name == '%s.sample' % index_file_name:
options.index_file = index_file_name
options.missing_index_file = None
if options.tool_data_table:
options.tool_data_table.missing_index_file = None
sample_found = True
break
if not sample_found:
correction_msg = "This file refers to a file named <b>%s</b>. " % str( index_file_name )
correction_msg += "Upload a file named <b>%s.sample</b> to the repository to correct this error." % \
str( index_file_name )
invalid_files_and_errors_tups.append( ( tool_config_name, correction_msg ) )
return invalid_files_and_errors_tups
def concat_messages( self, msg1, msg2 ):
if msg1:
if msg2:
message = '%s %s' % ( msg1, msg2 )
else:
message = msg1
elif msg2:
message = msg2
else:
message = ''
return message
def copy_disk_sample_files_to_dir( self, repo_files_dir, dest_path ):
"""
Copy all files currently on disk that end with the .sample extension to the
directory to which dest_path refers.
"""
sample_files = []
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
for name in files:
if name.endswith( '.sample' ):
relative_path = os.path.join( root, name )
tool_util.copy_sample_file( self.app, relative_path, dest_path=dest_path )
sample_files.append( name )
return sample_files
def get_latest_tool_config_revision_from_repository_manifest( self, repo, filename, changeset_revision ):
"""
Get the latest revision of a tool config file named filename from the repository
manifest up to the value of changeset_revision. This method is restricted to tool_config
files rather than any file since it is likely that, with the exception of tool config
files, multiple files will have the same name in various directories within the repository.
"""
stripped_filename = basic_util.strip_path( filename )
for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):
manifest_ctx = repo.changectx( changeset )
for ctx_file in manifest_ctx.files():
ctx_file_name = basic_util.strip_path( ctx_file )
if ctx_file_name == stripped_filename:
try:
fctx = manifest_ctx[ ctx_file ]
except LookupError:
# The ctx_file may have been moved in the change set. For example,
# 'ncbi_blastp_wrapper.xml' was moved to 'tools/ncbi_blast_plus/ncbi_blastp_wrapper.xml',
# so keep looking for the file until we find the new location.
continue
fh = tempfile.NamedTemporaryFile( 'wb', prefix="tmp-toolshed-gltcrfrm" )
tmp_filename = fh.name
fh.close()
fh = open( tmp_filename, 'wb' )
fh.write( fctx.data() )
fh.close()
return tmp_filename
return None
def get_list_of_copied_sample_files( self, repo, ctx, dir ):
"""
Find all sample files (files in the repository with the special .sample extension)
in the reversed repository manifest up to ctx. Copy each discovered file to dir and
return the list of filenames. If a .sample file was added in a changeset and then
deleted in a later changeset, it will be returned in the deleted_sample_files list.
The caller will set the value of app.config.tool_data_path to dir in order to load
the tools and generate metadata for them.
"""
deleted_sample_files = []
sample_files = []
for changeset in hg_util.reversed_upper_bounded_changelog( repo, ctx ):
changeset_ctx = repo.changectx( changeset )
for ctx_file in changeset_ctx.files():
ctx_file_name = basic_util.strip_path( ctx_file )
# If we decide in the future that files deleted later in the changelog should
# not be used, we can use the following if statement. if ctx_file_name.endswith( '.sample' )
# and ctx_file_name not in sample_files and ctx_file_name not in deleted_sample_files:
if ctx_file_name.endswith( '.sample' ) and ctx_file_name not in sample_files:
fctx = hg_util.get_file_context_from_ctx( changeset_ctx, ctx_file )
if fctx in [ 'DELETED' ]:
# Since the possibly future used if statement above is commented out, the
# same file that was initially added will be discovered in an earlier changeset
# in the change log and fall through to the else block below. In other words,
# if a file named blast2go.loc.sample was added in change set 0 and then deleted
# in changeset 3, the deleted file in changeset 3 will be handled here, but the
# later discovered file in changeset 0 will be handled in the else block below.
# In this way, the file contents will always be found for future tools even though
# the file was deleted.
if ctx_file_name not in deleted_sample_files:
deleted_sample_files.append( ctx_file_name )
else:
sample_files.append( ctx_file_name )
tmp_ctx_file_name = os.path.join( dir, ctx_file_name.replace( '.sample', '' ) )
fh = open( tmp_ctx_file_name, 'wb' )
fh.write( fctx.data() )
fh.close()
return sample_files, deleted_sample_files
def handle_sample_files_and_load_tool_from_disk( self, repo_files_dir, repository_id, tool_config_filepath, work_dir ):
"""
Copy all sample files from disk to a temporary directory since the sample files may
be in multiple directories.
"""
message = ''
sample_files = self.copy_disk_sample_files_to_dir( repo_files_dir, work_dir )
if sample_files:
if 'tool_data_table_conf.xml.sample' in sample_files:
# Load entries into the tool_data_tables if the tool requires them.
tool_data_table_config = os.path.join( work_dir, 'tool_data_table_conf.xml' )
error, message = self.tdtm.handle_sample_tool_data_table_conf_file( tool_data_table_config,
persist=False )
tool, valid, message2 = self.load_tool_from_config( repository_id, tool_config_filepath )
message = self.concat_messages( message, message2 )
return tool, valid, message, sample_files
def handle_sample_files_and_load_tool_from_tmp_config( self, repo, repository_id, changeset_revision,
tool_config_filename, work_dir ):
tool = None
message = ''
ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
# We're not currently doing anything with the returned list of deleted_sample_files here. It is
# intended to help handle sample files that are in the manifest, but have been deleted from disk.
sample_files, deleted_sample_files = self.get_list_of_copied_sample_files( repo, ctx, dir=work_dir )
if sample_files:
self.app.config.tool_data_path = work_dir
if 'tool_data_table_conf.xml.sample' in sample_files:
# Load entries into the tool_data_tables if the tool requires them.
tool_data_table_config = os.path.join( work_dir, 'tool_data_table_conf.xml' )
if tool_data_table_config:
error, message = self.tdtm.handle_sample_tool_data_table_conf_file( tool_data_table_config,
persist=False )
if error:
log.debug( message )
manifest_ctx, ctx_file = hg_util.get_ctx_file_path_from_manifest( tool_config_filename, repo, changeset_revision )
if manifest_ctx and ctx_file:
tool, message2 = self.load_tool_from_tmp_config( repo, repository_id, manifest_ctx, ctx_file, work_dir )
message = self.concat_messages( message, message2 )
return tool, message, sample_files
def load_tool_from_changeset_revision( self, repository_id, changeset_revision, tool_config_filename ):
"""
Return a loaded tool whose tool config file name (e.g., filtering.xml) is the value
of tool_config_filename. The value of changeset_revision is a valid (downloadable)
changeset revision. The tool config will be located in the repository manifest between
the received valid changeset revision and the first changeset revision in the repository,
searching backwards.
"""
original_tool_data_path = self.app.config.tool_data_path
repository = suc.get_repository_in_tool_shed( self.app, repository_id )
repo_files_dir = repository.repo_path( self.app )
repo = hg_util.get_repo_for_repository( self.app, repository=None, repo_path=repo_files_dir, create=False )
message = ''
tool = None
can_use_disk_file = False
tool_config_filepath = suc.get_absolute_path_to_file_in_repository( repo_files_dir, tool_config_filename )
work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-ltfcr" )
can_use_disk_file = self.can_use_tool_config_disk_file( repository,
repo,
tool_config_filepath,
changeset_revision )
if can_use_disk_file:
self.app.config.tool_data_path = work_dir
tool, valid, message, sample_files = \
self.handle_sample_files_and_load_tool_from_disk( repo_files_dir,
repository_id,
tool_config_filepath,
work_dir )
if tool is not None:
invalid_files_and_errors_tups = \
self.check_tool_input_params( repo_files_dir,
tool_config_filename,
tool,
sample_files )
if invalid_files_and_errors_tups:
message2 = tool_util.generate_message_for_invalid_tools( self.app,
invalid_files_and_errors_tups,
repository,
metadata_dict=None,
as_html=True,
displaying_invalid_tool=True )
message = self.concat_messages( message, message2 )
else:
tool, message, sample_files = \
self.handle_sample_files_and_load_tool_from_tmp_config( repo,
repository_id,
changeset_revision,
tool_config_filename,
work_dir )
basic_util.remove_dir( work_dir )
self.app.config.tool_data_path = original_tool_data_path
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
self.tdtm.reset_tool_data_tables()
return repository, tool, message
def load_tool_from_config( self, repository_id, full_path ):
try:
tool = self.app.toolbox.load_tool( full_path, repository_id=repository_id )
valid = True
error_message = None
except KeyError, e:
tool = None
valid = False
error_message = 'This file requires an entry for "%s" in the tool_data_table_conf.xml file. Upload a file ' % str( e )
error_message += 'named tool_data_table_conf.xml.sample to the repository that includes the required entry to correct '
error_message += 'this error. '
except Exception, e:
tool = None
valid = False
error_message = str( e )
return tool, valid, error_message
def load_tool_from_tmp_config( self, repo, repository_id, ctx, ctx_file, work_dir ):
tool = None
message = ''
tmp_tool_config = hg_util.get_named_tmpfile_from_ctx( ctx, ctx_file, work_dir )
if tmp_tool_config:
tool_element, error_message = xml_util.parse_xml( tmp_tool_config )
if tool_element is None:
return tool, message
# Look for external files required by the tool config.
tmp_code_files = []
external_paths = Tool.get_externally_referenced_paths( tmp_tool_config )
for path in external_paths:
tmp_code_file_name = hg_util.copy_file_from_manifest( repo, ctx, path, work_dir )
if tmp_code_file_name:
tmp_code_files.append( tmp_code_file_name )
tool, valid, message = self.load_tool_from_config( repository_id, tmp_tool_config )
for tmp_code_file in tmp_code_files:
try:
os.unlink( tmp_code_file )
except:
pass
try:
os.unlink( tmp_tool_config )
except:
pass
return tool, message
| gpl-3.0 |
broferek/ansible | lib/ansible/plugins/callback/skippy.py | 83 | 1404 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: skippy
callback_type: stdout
requirements:
- set as main display callback
short_description: Ansible screen output that ignores skipped status
version_added: "2.0"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
description:
- This callback does the same as the default except it does not output skipped host/task/item status
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
| gpl-3.0 |
JohnDoee/autotorrent | autotorrent/tests/test_db.py | 2 | 8809 | from __future__ import unicode_literals
import logging
import os
import shutil
import tempfile
from io import open
from logging.handlers import BufferingHandler
from unittest import TestCase
from ..db import Database
def create_file(temp_folder, path, size):
path = os.path.join(temp_folder, *path)
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(path, 'w') as f:
f.write(u'x' * size)
class TestHandler(BufferingHandler):
def __init__(self):
BufferingHandler.__init__(self, 0)
def shouldFlush(self):
return False
def emit(self, record):
self.buffer.append(record.msg)
class TestDatabase(TestCase):
def setUp(self):
self._temp_path = tempfile.mkdtemp()
self._fs = [
(['1', 'a'], 10),
(['1', 'b'], 20),
(['1', 'f', 'a'], 12),
(['1', 'f', 'c'], 15),
(['2', 'd'], 12),
(['2', 'e'], 15),
]
for p, size in self._fs:
create_file(self._temp_path, p, size)
os.makedirs(os.path.join(self._temp_path, '3'))
dirname = os.path.join(os.path.dirname(__file__), 'testfiles')
for f in ['Some-CD-Release', 'Some-Release', 'My-Bluray', 'My-DVD']:
src = os.path.join(dirname, f)
dst = os.path.join(self._temp_path, '3', f)
shutil.copytree(src, dst)
shutil.copy(src + '.torrent', dst + '.torrent')
self.db = Database(os.path.join(self._temp_path, 'autotorrent.db'), [os.path.join(self._temp_path, '1'),
os.path.join(self._temp_path, '2'),
os.path.join(self._temp_path, '3')], [],
True, True, True, False, False, False)
self.db.rebuild()
def tearDown(self):
if self._temp_path.startswith('/tmp'): # paranoid-mon, the best pokemon.
shutil.rmtree(self._temp_path)
def test_keyify_utf8(self):
key = 'test \xef\xbc\x9a'
self.db.keyify(0, key)
def test_initial_build(self):
for p, size in self._fs:
result = self.db.find_file_path(p[-1], size)
p = os.path.join(self._temp_path, *p)
self.assertEqual(result, p)
def test_rebuild(self):
fs = [
(['2', 'e'], 16),
(['2', 'f'], 15),
]
for p, size in fs:
create_file(self._temp_path, p, size)
self._fs.pop()
self._fs += fs
self.db.rebuild()
self.test_initial_build()
def test_rebuild_specific_path(self):
fs = [
(['2', 'e'], 16),
(['2', 'f'], 15),
]
for p, size in fs:
create_file(self._temp_path, p, size)
self._fs.pop()
self._fs += fs
self.db.rebuild([self._temp_path])
self.test_initial_build()
def test_ignore_file(self):
self.db.ignore_files = ['a*']
self.db.rebuild()
items = [self._fs.pop(0), self._fs.pop(1)]
for p, size in items:
self.assertEqual(self.db.find_file_path(p[-1], size), None)
self.test_initial_build()
def test_normalized(self):
fs = [
(['2', 'B C'], 16),
]
for p, size in fs:
create_file(self._temp_path, p, size)
self._fs += fs
self.db.rebuild()
self.test_initial_build()
self.assertEqual(self.db.find_file_path('b_c', 16), os.path.join(self._temp_path, '2', 'B C'))
def test_unicode(self):
fs = [
(['2', '\xc6'], 16),
]
for p, size in fs:
create_file(self._temp_path, p, size)
self._fs += fs
self.db.rebuild()
self.test_initial_build()
self.assertEqual(self.db.find_file_path('\xc6', 16), os.path.join(self._temp_path, '2', '\xc6'))
def test_unsplitable_release(self):
self.assertEqual(self.db.find_unsplitable_file_path('Some-Release', ['some-rls.r01'], 12),
os.path.join(self._temp_path, '3', 'Some-Release', 'some-rls.r01'))
self.assertEqual(self.db.find_unsplitable_file_path('Some-Release', ['some-rls.sfv'], 12),
os.path.join(self._temp_path, '3', 'Some-Release', 'some-rls.sfv'))
self.assertEqual(self.db.find_unsplitable_file_path('Some-Release', ['sample', 'some-rls.mkv'], 12),
os.path.join(self._temp_path, '3', 'Some-Release', 'Sample', 'some-rls.mkv'))
def test_unsplitable_release_multicd(self):
self.assertEqual(self.db.find_unsplitable_file_path('Some-CD-Release', ['CD1', 'somestuff-1.r04'], 11),
os.path.join(self._temp_path, '3', 'Some-CD-Release', 'CD1', 'somestuff-1.r04'))
self.assertEqual(self.db.find_unsplitable_file_path('Some-CD-Release', ['cd2', 'somestuff-2.r04'], 11),
os.path.join(self._temp_path, '3', 'Some-CD-Release', 'CD2', 'somestuff-2.r04'))
self.assertEqual(self.db.find_unsplitable_file_path('Some-CD-Release', ['subs', 'somestuff-subs.rar'], 11),
os.path.join(self._temp_path, '3', 'Some-CD-Release', 'Subs', 'somestuff-subs.rar'))
self.assertEqual(self.db.find_unsplitable_file_path('Some-CD-Release', ['Sample', 'some-rls.mkv'], 12),
os.path.join(self._temp_path, '3', 'Some-CD-Release', 'Sample', 'some-rls.mkv'))
def test_exact_release(self):
self.assertEqual(self.db.find_exact_file_path('d', 'Some-Release'),
[os.path.join(self._temp_path, '3', 'Some-Release')])
self.assertEqual(self.db.find_exact_file_path('d', 'Some-CD-Release'),
[os.path.join(self._temp_path, '3', 'Some-CD-Release')])
self.assertEqual(self.db.find_exact_file_path('f', 'some-rls.mkv'), None)
self.assertEqual(self.db.find_exact_file_path('f', 'a'),
[os.path.join(self._temp_path, '1', 'a'),
os.path.join(self._temp_path, '1', 'f', 'a')])
def test_exact_bluray_release(self):
self.assertEqual(self.db.find_exact_file_path('d', 'My-Bluray'), [os.path.join(self._temp_path, '3', 'My-Bluray')])
def test_exact_dvd_release(self):
self.assertEqual(self.db.find_exact_file_path('d', 'My-DVD'), [os.path.join(self._temp_path, '3', 'My-DVD')])
def test_hash_rebuild(self):
self.db.hash_name_mode = True
self.db.hash_size_mode = True
self.db.hash_slow_mode = True
self.db.hash_mode = True
self.db.hash_mode_size_varying = 20.0
self.db.rebuild()
self.db.build_hash_size_table()
self.assertEqual(self.db.find_hash_name('some-rls.mkv'), [])
self.assertEqual(self.db.find_hash_name('a'),
[os.path.join(self._temp_path, '1', 'a'),
os.path.join(self._temp_path, '1', 'f', 'a')])
self.assertEqual(self.db.find_hash_size(12),
[os.path.join(self._temp_path, '1', 'f', 'a'),
os.path.join(self._temp_path, '2', 'd')])
self.assertEqual(self.db.find_hash_varying_size(12),
[os.path.join(self._temp_path, '1', 'f', 'a'),
os.path.join(self._temp_path, '2', 'd'),
os.path.join(self._temp_path, '1', 'a')])
self.db.unsplitable_mode = False
self.db.rebuild()
self.db.clear_hash_size_table()
self.db.build_hash_size_table()
self.assertEqual(sorted(self.db.find_hash_name('some-rls.mkv')),
sorted([os.path.join(self._temp_path, '3', 'Some-Release', 'Sample', 'some-rls.mkv'),
os.path.join(self._temp_path, '3', 'Some-CD-Release', 'Sample', 'some-rls.mkv')]))
def test_inaccessible_file(self):
h = TestHandler()
l = logging.getLogger('autotorrent.db')
l.addHandler(h)
inaccessible_path = os.path.join(self._temp_path, '1', 'a')
os.chmod(inaccessible_path, 0000)
self.db.rebuild()
self.assertTrue("Path %r is not accessible, skipping" % inaccessible_path in h.buffer)
l.removeHandler(h)
h.close() | mit |
titienmiami/mmc.repository | plugin.video.SportsDevil/lib/utils/github/GitTag.py | 7 | 3445 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import GitAuthor
import GitObject
class GitTag(GithubObject.GithubObject):
@property
def message(self):
self._completeIfNotSet(self._message)
return self._NoneIfNotSet(self._message)
@property
def object(self):
self._completeIfNotSet(self._object)
return self._NoneIfNotSet(self._object)
@property
def sha(self):
self._completeIfNotSet(self._sha)
return self._NoneIfNotSet(self._sha)
@property
def tag(self):
self._completeIfNotSet(self._tag)
return self._NoneIfNotSet(self._tag)
@property
def tagger(self):
self._completeIfNotSet(self._tagger)
return self._NoneIfNotSet(self._tagger)
@property
def url(self):
self._completeIfNotSet(self._url)
return self._NoneIfNotSet(self._url)
def _initAttributes(self):
self._message = GithubObject.NotSet
self._object = GithubObject.NotSet
self._sha = GithubObject.NotSet
self._tag = GithubObject.NotSet
self._tagger = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes(self, attributes):
if "message" in attributes: # pragma no branch
assert attributes["message"] is None or isinstance(attributes["message"], (str, unicode)), attributes["message"]
self._message = attributes["message"]
if "object" in attributes: # pragma no branch
assert attributes["object"] is None or isinstance(attributes["object"], dict), attributes["object"]
self._object = None if attributes["object"] is None else GitObject.GitObject(self._requester, attributes["object"], completed=False)
if "sha" in attributes: # pragma no branch
assert attributes["sha"] is None or isinstance(attributes["sha"], (str, unicode)), attributes["sha"]
self._sha = attributes["sha"]
if "tag" in attributes: # pragma no branch
assert attributes["tag"] is None or isinstance(attributes["tag"], (str, unicode)), attributes["tag"]
self._tag = attributes["tag"]
if "tagger" in attributes: # pragma no branch
assert attributes["tagger"] is None or isinstance(attributes["tagger"], dict), attributes["tagger"]
self._tagger = None if attributes["tagger"] is None else GitAuthor.GitAuthor(self._requester, attributes["tagger"], completed=False)
if "url" in attributes: # pragma no branch
assert attributes["url"] is None or isinstance(attributes["url"], (str, unicode)), attributes["url"]
self._url = attributes["url"]
| gpl-2.0 |
pet1330/zoidbot | interactive_face/src/zoidberg.py | 3 | 1720 | #!/usr/bin/python
import cv2
from cv_bridge import CvBridge
from cv_bridge import CvBridgeError
import numpy as np
import rosparam
import rospy
from sensor_msgs.msg import Image
import sys
import tf
import rospkg
class interactive_face:
def __init__(self):
self.pub = rospy.Publisher('/robot/xdisplay', Image, queue_size=1)
self.bridge = CvBridge()
self.cv_bg = np.zeros((600,1024,3), np.uint8)
self.cv_bg[:,:] = (91,91,240)
self.rospack = rospkg.RosPack()
self.cv_blink = []
for i in range(0,4):
self.cv_blink.append(cv2.imread(self.rospack.get_path('interactive_face')+'/assets/face_blink' + str(i) + '.png',cv2.CV_LOAD_IMAGE_COLOR))
self.cv_face = self.cv_blink[0]
self.face_offsetX = 370
self.face_offsetY = 150
rospy.Timer(rospy.Duration(5), self.blink)
def run(self):
l_img = self.cv_bg
self.rate = rospy.Rate(8)
while not rospy.is_shutdown():
s_img = self.cv_face
l_img[self.face_offsetY:(s_img.shape[0]+self.face_offsetY),self.face_offsetX:(s_img.shape[1]+self.face_offsetX),:] = s_img
self.pub.publish(self.bridge.cv2_to_imgmsg(l_img))
self.rate.sleep()
def blink(self,event):
r = rospy.Rate(7)
r2 = rospy.Rate(5)
for i in range(0,4):
self.cv_face = self.cv_blink[i]
r.sleep()
r2.sleep()
for i in range(3,-1,-1):
self.cv_face = self.cv_blink[i]
r2.sleep()
def main():
rospy.init_node('interactive_face', anonymous=True)
in_face = interactive_face()
in_face.run();
if __name__ == '__main__':
main() | mit |
ArseniyK/Sunflower | application/operation.py | 1 | 48288 | import os
import gtk
import gobject
import fnmatch
from threading import Thread, Event
from gui.input_dialog import OverwriteFileDialog, OverwriteDirectoryDialog, OperationError, QuestionOperationError
from gui.operation_dialog import CopyDialog, MoveDialog, DeleteDialog, RenameDialog
from gui.error_list import ErrorList
from plugin_base.provider import Mode as FileMode, TrashError, Support as ProviderSupport
from plugin_base.monitor import MonitorSignals
from common import format_size
from queue import OperationQueue
# import constants
from gui.input_dialog import OverwriteOption
class BufferSize:
LOCAL = 4096 * 1024
REMOTE = 100 * 1024
class Option:
FILE_TYPE = 0
DESTINATION = 1
SET_OWNER = 2
SET_MODE = 3
SET_TIMESTAMP = 4
SILENT = 5
SILENT_MERGE = 6
SILENT_OVERWRITE = 7
class Skip:
TRASH = 0
REMOVE = 1
WRITE = 2
CREATE = 3
MODE_SET = 4
MOVE = 5
RENAME = 6
READ = 7
class OperationType:
COPY = 0
MOVE = 1
DELETE = 2
RENAME = 3
LINK = 4
class Operation(Thread):
"""Parent class for all operation threads"""
def __init__(self, application, source, destination=None, options=None, destination_path=None):
Thread.__init__(self, target=self)
self._can_continue = Event()
self._abort = Event()
self._application = application
self._source = source
self._destination = destination
self._options = options
self._source_queue = None
self._destination_queue = None
self._merge_all = None
self._overwrite_all = None
self._response_cache = {}
# operation queue
self._operation_queue = None
self._operation_queue_name = None
# daemonize
self.daemon = True
# create operation dialog
self._dialog = None
self._create_dialog()
self._dir_list = []
self._file_list = []
self._error_list = []
self._selection_list = []
# store initial paths
self._source_path = self._source.get_path()
if self._destination is not None:
self._destination_path = destination_path or self._destination.get_path()
self._can_continue.set()
def _create_dialog(self):
"""Create operation dialog"""
pass
def _destroy_ui(self):
"""Destroy user interface"""
if self._dialog is not None:
with gtk.gdk.lock:
self._dialog.destroy()
def _get_free_space_input(self, needed, available):
"""Get user input when there is not enough space"""
size_format = self._application.options.get('size_format')
space_needed = format_size(needed, size_format)
space_available = format_size(available, size_format)
if self._options is not None and self._options[Option.SILENT]:
# silent option is enabled, we skip operation by default
self._error_list.append(_(
'Aborted. Not enough free space on target file system.\n'
'Needed: {0}\n'
'Available: {1}'
).format(space_needed, space_available))
should_continue = False
else:
# ask user what to do
with gtk.gdk.lock:
dialog = gtk.MessageDialog(
self._dialog.get_window(),
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
_(
'Target file system does not have enough '
'free space for this operation to continue.\n\n'
'Needed: {0}\n'
'Available: {1}\n\n'
'Do you wish to continue?'
).format(space_needed, space_available)
)
dialog.set_default_response(gtk.RESPONSE_YES)
result = dialog.run()
dialog.destroy()
should_continue = result == gtk.RESPONSE_YES
return should_continue
def _get_merge_input(self, path):
"""Get merge confirmation"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, do what user specified
merge = self._options[Option.SILENT_MERGE]
self._merge_all = merge
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OverwriteDirectoryDialog(self._application, self._dialog.get_window())
title_element = os.path.basename(path)
message_element = os.path.basename(os.path.dirname(os.path.join(self._destination.get_path(), path)))
dialog.set_title_element(title_element)
dialog.set_message_element(message_element)
dialog.set_rename_value(title_element)
dialog.set_source(
self._source,
path,
relative_to=self._source_path
)
dialog.set_original(
self._destination,
path,
relative_to=self._destination_path
)
result = dialog.get_response()
merge = result[0] == gtk.RESPONSE_YES
if result[1][OverwriteOption.APPLY_TO_ALL]:
self._merge_all = merge
# in case user canceled operation
if result[0] == gtk.RESPONSE_CANCEL:
self.cancel()
return merge # return only response for current directory
def _get_overwrite_input(self, path):
"""Get overwrite confirmation"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, do what user specified
overwrite = self._options[Option.SILENT_OVERWRITE]
self._overwrite_all = overwrite
options = (False, '', True) # no rename, apply to all
else:
# we are not in silent mode, ask user what to do
with gtk.gdk.lock:
dialog = OverwriteFileDialog(self._application, self._dialog.get_window())
title_element = os.path.basename(path)
message_element = os.path.basename(os.path.dirname(os.path.join(self._destination.get_path(), path)))
dialog.set_title_element(title_element)
dialog.set_message_element(message_element)
dialog.set_rename_value(title_element)
dialog.set_source(
self._source,
path,
relative_to=self._source_path
)
dialog.set_original(
self._destination,
path,
relative_to=self._destination_path
)
result = dialog.get_response()
overwrite = result[0] == gtk.RESPONSE_YES
if result[1][OverwriteOption.APPLY_TO_ALL]:
self._overwrite_all = overwrite
# in case user canceled operation
if result[0] == gtk.RESPONSE_CANCEL:
self.cancel()
# pass options from input dialog
options = result[1]
return overwrite, options
def _get_write_error_input(self, error):
"""Get user response for write error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = OperationError.RESPONSE_SKIP
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'There is a problem writing data to destination '
'file. What would you like to do?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.WRITE] = response
# abort operation if user requested
if response == OperationError.RESPONSE_CANCEL:
self.cancel()
return response
def _get_create_error_input(self, error, is_directory=False):
"""Get user response for create error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = OperationError.RESPONSE_SKIP
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
if not is_directory:
# set message for file
dialog.set_message(_(
'An error occurred while trying to create specified '
'file. What would you like to do?'
))
else:
# set message for directory
dialog.set_message(_(
'An error occurred while trying to create specified '
'directory. What would you like to do?'
))
dialog.set_error(str(error))
# get user response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.CREATE] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_mode_set_error_input(self, error):
"""Get user response for mode set error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = OperationError.RESPONSE_SKIP
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'Problem with setting path parameter for '
'specified path. What would you like to do?'
))
dialog.set_error(str(error))
# get user response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.MODE_SET] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_remove_error_input(self, error):
"""Get user response for remove error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = OperationError.RESPONSE_SKIP
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'There was a problem removing specified path. '
'What would you like to do?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.REMOVE] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_trash_error_input(self, error):
"""Get user response for remove error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = gtk.RESPONSE_NO
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = QuestionOperationError(self._application)
dialog.set_message(_(
'There was a problem trashing specified path. '
'Would you like to try removing it instead?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.TRASH] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_move_error_input(self, error):
"""Get user response for move error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = gtk.RESPONSE_NO
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'There was a problem moving specified path. '
'What would you like to do?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.MOVE] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_rename_error_input(self, error):
"""Get user response for rename error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = gtk.RESPONSE_NO
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'There was a problem renaming specified path. '
'What would you like to do?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.RENAME] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def _get_read_error_input(self, error):
"""Get user response for directory listing error"""
if self._options is not None and self._options[Option.SILENT]:
# we are in silent mode, set response and log error
self._error_list.append(str(error))
response = gtk.RESPONSE_NO
else:
# we are not in silent mode, ask user
with gtk.gdk.lock:
dialog = OperationError(self._application)
dialog.set_message(_(
'There was a problem with reading specified directory. '
'What would you like to do?'
))
dialog.set_error(str(error))
# get users response
response = dialog.get_response()
# check if this response applies to future errors
if response == OperationError.RESPONSE_SKIP_ALL:
response = OperationError.RESPONSE_SKIP
self._response_cache[Skip.READ] = response
# abort operation if user requested
if response == gtk.RESPONSE_CANCEL:
self.cancel()
return response
def set_selection(self, item_list):
"""Set list of selected items"""
self._selection_list.extend(item_list)
def set_operation_queue(self, queue_name):
"""Set operation to wait for queue."""
if queue_name is None:
return
# create new queue
self._operation_queue = Event()
self._operation_queue_name = queue_name
# schedule operation
OperationQueue.add(queue_name, self._operation_queue)
def set_source_queue(self, queue):
"""Set event queue for fall-back monitor support"""
self._source_queue = queue
def set_destination_queue(self, queue):
"""Set event queue for fall-back monitor support"""
self._destination_queue = queue
def pause(self):
"""Pause current operation"""
self._can_continue.clear()
def resume(self):
"""Resume current operation"""
self._can_continue.set()
def cancel(self):
"""Set an abort switch"""
self._abort.set()
# release lock set by the pause
if not self._can_continue.is_set():
self.resume()
class CopyOperation(Operation):
"""Operation thread used for copying files"""
def __init__(self, application, source, destination, options, destination_path=None):
Operation.__init__(self, application, source, destination, options, destination_path)
self._merge_all = None
self._overwrite_all = None
self._dir_list_create = []
self._total_count = 0
self._total_size = 0
self._buffer_size = 0
# cache settings
should_reserve = self._application.options.section('operations').get('reserve_size')
supported_by_provider = ProviderSupport.RESERVE_SIZE in self._destination.get_support()
self._reserve_size = should_reserve and supported_by_provider
# detect buffer size
if self._source.is_local and self._destination.is_local:
system_stat = self._destination.get_system_size(self._destination_path)
if system_stat.block_size:
self._buffer_size = system_stat.block_size * 1024
else:
self._buffer_size = BufferSize.LOCAL
else:
self._buffer_size = BufferSize.REMOTE
def _create_dialog(self):
"""Create progress dialog"""
self._dialog = CopyDialog(self._application, self)
def _update_status(self, status):
"""Set status and reset progress bars"""
self._dialog.set_status(status)
self._dialog.set_current_file("")
self._dialog.set_current_file_fraction(0)
def _get_lists(self):
"""Find all files for copying"""
gobject.idle_add(self._update_status, _('Searching for files...'))
# exclude files already selected with parent directory
for file_name in self._selection_list:
self._selection_list = filter(
lambda item: not item.startswith(file_name + os.path.sep),
self._selection_list
)
# traverse through the rest of the items
for item in self._selection_list:
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
# update current file label
gobject.idle_add(self._dialog.set_current_file, item)
gobject.idle_add(self._dialog.pulse)
if os.path.sep in item:
relative_path, item = os.path.split(item)
source_path = os.path.join(self._source_path, relative_path)
else:
relative_path = None
source_path = self._source_path
if self._source.is_dir(item, relative_to=source_path):
# item is directory
can_procede = True
can_create = True
# check if directory exists on destination
if self._destination.exists(item, relative_to=self._destination_path):
can_create = False
if self._merge_all is not None:
can_procede = self._merge_all
else:
can_procede = self._get_merge_input(item)
# if user didn't skip directory, scan and update lists
if can_procede:
self._dir_list.append((item, relative_path))
if can_create: self._dir_list_create.append((item, relative_path))
self._scan_directory(item, relative_path)
elif fnmatch.fnmatch(item, self._options[Option.FILE_TYPE]):
# item is a file, get stats and update lists
item_stat = self._source.get_stat(item, relative_to=source_path)
gobject.idle_add(self._dialog.increment_total_size, item_stat.size)
gobject.idle_add(self._dialog.increment_total_count, 1)
self._total_count += 1
self._total_size += item_stat.size
self._file_list.append((item, relative_path))
def _set_mode(self, path, mode):
"""Set mode for specified path"""
if not self._options[Option.SET_MODE]: return
try:
# try to set mode for specified path
self._destination.set_mode(
path,
mode,
relative_to=self._destination_path
)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.ATTRIBUTE_CHANGED, path, None)
self._destination_queue.put(event, False)
except StandardError as error:
# problem setting mode, ask user
if Skip.MODE_SET in self._response_cache:
response = self._response_cache[Skip.MODE_SET]
else:
response = self._get_mode_set_error_input(error)
# try to set mode again
if response == OperationError.RESPONSE_RETRY:
self._set_mode(path, mode)
return
def _set_owner(self, path, user_id, group_id):
"""Set owner and group for specified path"""
if not self._options[Option.SET_OWNER]: return
try:
# try set owner of specified path
self._destination.set_owner(
path,
user_id,
group_id,
relative_to=self._destination_path
)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.ATTRIBUTE_CHANGED, path, None)
self._destination_queue.put(event, False)
except StandardError as error:
# problem with setting owner, ask user
if Skip.MODE_SET in self._response_cache:
response = self._response_cache[Skip.MODE_SET]
else:
response = self._get_mode_set_error_input(error)
# try to set owner again
if response == OperationError.RESPONSE_RETRY:
self._set_owner(path, user_id, group_id)
return
def _set_timestamp(self, path, access_time, modify_time, change_time):
"""Set timestamps for specified path"""
if not self._options[Option.SET_TIMESTAMP]: return
try:
# try setting timestamp
self._destination.set_timestamp(
path,
access_time,
modify_time,
change_time,
relative_to=self._destination_path
)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.ATTRIBUTE_CHANGED, path, None)
self._destination_queue.put(event, False)
except StandardError as error:
# problem with setting owner, ask user
if Skip.MODE_SET in self._response_cache:
response = self._response_cache[Skip.MODE_SET]
else:
response = self._get_mode_set_error_input(error)
# try to set timestamp again
if response == OperationError.RESPONSE_RETRY:
self._set_timestamp(path, access_time, modify_time, change_time)
return
def _scan_directory(self, directory, relative_path=None):
"""Recursively scan directory and populate list"""
source_path = self._source_path if relative_path is None else os.path.join(self._source_path, relative_path)
try:
# try to get listing from directory
item_list = self._source.list_dir(directory, relative_to=source_path)
except StandardError as error:
# problem with reading specified directory, ask user
if Skip.READ in self._response_cache:
response = self._response_cache[Skip.READ]
else:
response = self._get_read_error_input(error)
# try to scan specified directory again
if response == OperationError.RESPONSE_RETRY:
self._scan_directory(directory, relative_path)
return
for item in item_list:
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
gobject.idle_add(self._dialog.set_current_file, os.path.join(directory, item))
gobject.idle_add(self._dialog.pulse)
full_name = os.path.join(directory, item)
# item is a directory, scan it
if self._source.is_dir(full_name, relative_to=source_path):
can_procede = True
can_create = True
if self._destination.exists(full_name, relative_to=self._destination_path):
can_create = False
if self._merge_all is not None:
can_procede = self._merge_all
else:
can_procede = self._get_merge_input(full_name)
if can_procede:
# allow processing specified directory
self._dir_list.append((full_name, source_path))
if can_create: self._dir_list_create.append((full_name, source_path))
self._scan_directory(full_name, relative_path)
elif fnmatch.fnmatch(item, self._options[Option.FILE_TYPE]):
# item is a file, update global statistics
item_stat = self._source.get_stat(full_name, relative_to=source_path)
gobject.idle_add(self._dialog.increment_total_size, item_stat.size)
gobject.idle_add(self._dialog.increment_total_count, 1)
self._total_count += 1
self._total_size += item_stat.size
self._file_list.append((full_name, relative_path))
def _create_directory(self, directory, relative_path=None):
"""Create specified directory"""
source_path = self._source_path if relative_path is None else os.path.join(self._source_path, relative_path)
file_stat = self._source.get_stat(directory, relative_to=source_path)
mode = file_stat.mode if self._options[Option.SET_MODE] else 0755
try:
# try to create a directory
if self._destination.exists(directory, relative_to=self._destination_path):
if not self._destination.is_dir(directory, relative_to=self._destination_path):
raise StandardError(_(
'Unable to create directory because file with the same name '
'already exists in target directory.'
))
else:
# inode with specified name doesn't exist, create directory
self._destination.create_directory(
directory,
mode,
relative_to=self._destination_path
)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.CREATED, directory, None)
self._destination_queue.put(event, False)
except StandardError as error:
# there was a problem creating directory
if Skip.CREATE in self._response_cache:
response = self._response_cache[Skip.CREATE]
else:
response = self._get_create_error_input(error, True)
# try to create directory again
if response == OperationError.RESPONSE_RETRY:
self._create_directory(directory)
# exit method
return
# set owner
self._set_owner(directory, file_stat.user_id, file_stat.group_id)
def _copy_file(self, file_name, relative_path=None):
"""Copy file content"""
can_procede = True
source_path = self._source_path if relative_path is None else os.path.join(self._source_path, relative_path)
dest_file = file_name
sh = None
dh = None
# check if destination file exists
if self._destination.exists(file_name, relative_to=self._destination_path):
if self._overwrite_all is not None:
can_procede = self._overwrite_all
else:
can_procede, options = self._get_overwrite_input(file_name)
# get new name if user specified
if options[OverwriteOption.RENAME]:
dest_file = os.path.join(
os.path.dirname(file_name),
options[OverwriteOption.NEW_NAME]
)
elif source_path == self._destination_path:
can_procede = False
# if user skipped this file return
if not can_procede:
self._file_list.pop(self._file_list.index((file_name, relative_path)))
# update total size
file_stat = self._source.get_stat(file_name, relative_to=source_path)
gobject.idle_add(self._dialog.increment_current_size, file_stat.size)
return
try:
# get file stats
destination_size = 0L
file_stat = self._source.get_stat(file_name, relative_to=source_path, extended=True)
# get file handles
sh = self._source.get_file_handle(file_name, FileMode.READ, relative_to=source_path)
dh = self._destination.get_file_handle(dest_file, FileMode.WRITE, relative_to=self._destination_path)
# report error properly
if sh is None:
raise StandardError('Unable to open source file in read mode.')
if dh is None:
raise StandardError('Unable to open destination file in write mode.')
# reserve file size
if self._reserve_size:
# try to reserve file size in advance,
# can be slow on memory cards and network
try:
dh.truncate(file_stat.size)
except:
dh.truncate()
else:
# just truncate file to 0 size in case source file is smaller
dh.truncate()
dh.seek(0)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.CREATED, dest_file, None)
self._destination_queue.put(event, False)
except StandardError as error:
# close handles if they exist
if hasattr(sh, 'close'): sh.close()
if hasattr(dh, 'close'): sh.close()
if Skip.CREATE in self._response_cache:
response = self._response_cache[Skip.CREATE]
else:
response = self._get_create_error_input(error)
# try to create file again and copy contents
if response == OperationError.RESPONSE_RETRY:
self._copy_file(dest_file)
else:
# user didn't want to retry, remove file from list
self._file_list.pop(self._file_list.index((file_name, relative_path)))
# remove amount of copied bytes from total size
gobject.idle_add(self._dialog.increment_current_size, -destination_size)
# exit method
return
while True:
if self._abort.is_set(): break
self._can_continue.wait() # pause lock
data = sh.read(self._buffer_size)
if data:
try:
# try writing data to destination
dh.write(data)
except IOError as error:
# handle error
if Skip.WRITE in self._response_cache:
response = self._response_cache[Skip.WRITE]
else:
response = self._get_write_error_input(error)
# try to write data again
if response == OperationError.RESPONSE_RETRY:
gobject.idle_add(self._dialog.increment_current_size, -dh.tell())
if hasattr(sh, 'close'): sh.close()
if hasattr(dh, 'close'): sh.close()
self._copy_file(dest_file)
return
destination_size += len(data)
gobject.idle_add(self._dialog.increment_current_size, len(data))
if file_stat.size > 0: # ensure we don't end up with error on 0 size files
gobject.idle_add(
self._dialog.set_current_file_fraction,
destination_size / float(file_stat.size)
)
else:
gobject.idle_add(self._dialog.set_current_file_fraction, 1)
# push event to the queue
if self._destination_queue is not None:
event = (MonitorSignals.CHANGED, dest_file, None)
self._destination_queue.put(event, False)
else:
sh.close()
dh.close()
# set file parameters
self._set_mode(dest_file, file_stat.mode)
self._set_owner(dest_file, file_stat.user_id, file_stat.group_id)
self._set_timestamp(
dest_file,
file_stat.time_access,
file_stat.time_modify,
file_stat.time_change
)
break
def _create_directory_list(self):
"""Create all directories in list"""
gobject.idle_add(self._update_status, _('Creating directories...'))
for number, directory in enumerate(self._dir_list_create, 0):
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
gobject.idle_add(self._dialog.set_current_file, directory[0])
self._create_directory(directory[0], directory[1]) # create directory
gobject.idle_add(
self._dialog.set_current_file_fraction,
float(number) / len(self._dir_list)
)
def _copy_file_list(self):
"""Copy list of files to destination path"""
# update status
gobject.idle_add(self._update_status, _('Copying files...'))
item_list = self._file_list[:]
# copy all the files in list
for file_name, source_path in item_list:
# abort operation if requested
if self._abort.is_set(): break
self._can_continue.wait() # pause lock
# copy file
gobject.idle_add(self._dialog.set_current_file, file_name)
self._copy_file(file_name, source_path)
gobject.idle_add(self._dialog.increment_current_count, 1)
def run(self):
"""Main thread method, this is where all the stuff is happening"""
# set dialog info
with gtk.gdk.lock:
self._dialog.set_source(self._source_path)
self._dialog.set_destination(self._destination_path)
# wait for operation queue if needed
if self._operation_queue is not None:
self._operation_queue.wait()
# get list of items to copy
self._get_lists()
# check for available free space
system_info = self._destination.get_system_size(self._destination_path)
if ProviderSupport.SYSTEM_SIZE in self._destination.get_support() \
and self._total_size > system_info.size_available:
should_continue = self._get_free_space_input(self._total_size, system_info.size_available)
# exit if user chooses to
if not should_continue:
self.cancel()
# clear selection on source directory
with gtk.gdk.lock:
parent = self._source.get_parent()
if self._source_path == parent.path:
parent.deselect_all()
# perform operation
self._create_directory_list()
self._copy_file_list()
# notify user if window is not focused
with gtk.gdk.lock:
if not self._dialog.is_active() and not self._application.is_active() and not self._abort.is_set():
notify_manager = self._application.notification_manager
title = _('Copy Operation')
message = ngettext(
'Copying of {0} item from "{1}" to "{2}" is completed!',
'Copying of {0} items from "{1}" to "{2}" is completed!',
len(self._file_list) + len(self._dir_list)
).format(
len(self._file_list) + len(self._dir_list),
os.path.basename(self._source_path),
os.path.basename(self._destination_path)
)
# queue notification
notify_manager.notify(title, message)
# show error list if needed
if len(self._error_list) > 0:
error_list = ErrorList(self._dialog)
error_list.set_operation_name(_('Copy Operation'))
error_list.set_source(self._source_path)
error_list.set_destination(self._destination_path)
error_list.set_errors(self._error_list)
error_list.show()
# destroy dialog
self._destroy_ui()
# start next operation
if self._operation_queue is not None:
OperationQueue.start_next(self._operation_queue_name)
class MoveOperation(CopyOperation):
"""Operation thread used for moving files"""
def _remove_path(self, path, item_list, relative_path=None):
"""Remove path specified path."""
source_path = self._source_path if relative_path is None else os.path.join(self._source_path, relative_path)
try:
# try removing specified path
self._source.remove_path(path, relative_to=source_path)
# push event to the queue
if self._source_queue is not None:
event = (MonitorSignals.DELETED, path, None)
self._source_queue.put(event, False)
except StandardError as error:
# problem removing path, ask user what to do
if Skip.REMOVE in self._response_cache:
response = self._response_cache[Skip.REMOVE]
else:
response = self._get_remove_error_input(error)
# try removing path again
if response == OperationError.RESPONSE_RETRY:
self._remove_path(path, item_list)
else:
# user didn't want to retry, remove path from item_list
item_list.pop(item_list.index(path))
def _create_dialog(self):
"""Create progress dialog"""
self._dialog = MoveDialog(self._application, self)
def _move_file(self, file_name, relative_path=None):
"""Move specified file using provider rename method"""
can_procede = True
source_path = self._source_path if relative_path is None else os.path.join(self._source_path, relative_path)
dest_file = file_name
# check if destination file exists
if self._destination.exists(file_name, relative_to=self._destination_path):
if self._overwrite_all is not None:
can_procede = self._overwrite_all
else:
can_procede, options = self._get_overwrite_input(file_name)
# get new name if user specified
if options[OverwriteOption.RENAME]:
dest_file = os.path.join(
os.path.dirname(file_name),
options[OverwriteOption.NEW_NAME]
)
# if user skipped this file return
if not can_procede:
self._file_list.pop(self._file_list.index((file_name, relative_path)))
return
# move file
try:
self._source.move_path(
file_name,
os.path.join(self._destination_path, dest_file),
relative_to=source_path
)
# push events to the queue
if self._source_queue is not None:
event = (MonitorSignals.DELETED, file_name, None)
self._source_queue.put(event, False)
if self._destination_queue is not None:
event = (MonitorSignals.CREATED, dest_file, None)
self._destination_queue.put(event, False)
except StandardError as error:
# problem with moving file, ask user what to do
if Skip.MOVE in self._response_cache:
response = self._response_cache[Skip.MOVE]
else:
response = self._get_move_error_input(error)
# try moving file again
if response == OperationError.RESPONSE_RETRY:
self._move_file(dest_file)
else:
# user didn't want to retry, remove file from list
self._file_list.pop(self._file_list.index((file_name, relative_path)))
# exit method
return
def _move_file_list(self):
"""Move files from the list"""
gobject.idle_add(self._update_status, _('Moving files...'))
item_list = self._file_list[:]
for file_name, source_path in item_list:
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
# move file
gobject.idle_add(self._dialog.set_current_file, file_name)
self._move_file(file_name, source_path)
gobject.idle_add(self._dialog.increment_current_count, 1)
def _delete_file_list(self):
"""Remove files from source list"""
gobject.idle_add(self._update_status, _('Deleting source files...'))
item_list = self._file_list[:]
for number, item in enumerate(item_list, 0):
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
# remove path
gobject.idle_add(self._dialog.set_current_file, item[0])
self._remove_path(item[0], self._file_list, item[1])
# update current count
gobject.idle_add(
self._dialog.set_current_file_fraction,
float(number) / len(item_list)
)
self._delete_directories()
def _delete_directories(self):
"""Remove empty directories after moving files"""
gobject.idle_add(self._update_status, _('Deleting source directories...'))
dir_list = self._dir_list[:]
dir_list.reverse() # remove deepest directories first
for number, directory in enumerate(dir_list, 0):
source_path = self._source_path if directory[1] is None else os.path.join(self._source_path, directory[1])
directory = directory[0]
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
if self._source.exists(directory, relative_to=source_path):
gobject.idle_add(self._dialog.set_current_file, directory)
# try to get a list of items inside of directory
try:
item_list = self._source.list_dir(directory, relative_to=source_path)
except:
item_list = None
# remove directory if empty
if item_list is not None and len(item_list) == 0:
self._remove_path(directory, dir_list, relative_path=source_path)
# update current count
if len(dir_list) > 0:
gobject.idle_add(
self._dialog.set_current_file_fraction,
float(number) / len(dir_list)
)
else:
# prevent division by zero
gobject.idle_add(self._dialog.set_current_file_fraction, 1)
def _check_devices(self):
"""Check if source and destination are on the same file system"""
dev_source = self._source.get_stat(self._source.get_path(), extended=True).device
dev_destination = self._destination.get_stat(self._destination.get_path(), extended=True).device
return dev_source == dev_destination
def run(self):
"""Main thread method
We override this method from CopyDialog in order to provide
a bit smarter move operation.
"""
# set dialog info
with gtk.gdk.lock:
self._dialog.set_source(self._source_path)
self._dialog.set_destination(self._destination_path)
# wait for operation queue if needed
if self._operation_queue is not None:
self._operation_queue.wait()
# get list of items
self._get_lists()
# check for available free space
system_info = self._destination.get_system_size(self._destination_path)
if self._total_size > system_info.size_available and not self._check_devices():
should_continue = self._get_free_space_input(self._total_size, system_info.size_available)
# exit if user chooses to
if not should_continue:
self.cancel()
# clear selection on source directory
with gtk.gdk.lock:
parent = self._source.get_parent()
if self._source_path == parent.path:
parent.deselect_all()
# create directories
self._create_directory_list()
# copy/move files
if self._check_devices():
# both paths are on the same file system, move instead of copy
self._move_file_list()
self._delete_directories()
else:
# paths are located on different file systems, copy and remove
self._copy_file_list()
self._delete_file_list()
# notify user if window is not focused
with gtk.gdk.lock:
if not self._dialog.is_active() and not self._application.is_active() and not self._abort.is_set():
notify_manager = self._application.notification_manager
title = _('Move Operation')
message = ngettext(
'Moving of {0} item from "{1}" to "{2}" is completed!',
'Moving of {0} items from "{1}" to "{2}" is completed!',
len(self._file_list) + len(self._dir_list)
).format(
len(self._file_list) + len(self._dir_list),
os.path.basename(self._source_path),
os.path.basename(self._destination_path)
)
# queue notification
notify_manager.notify(title, message)
# shop error list if needed
if len(self._error_list) > 0:
error_list = ErrorList(self._dialog)
error_list.set_operation_name(_('Move Operation'))
error_list.set_source(self._source_path)
error_list.set_destination(self._destination_path)
error_list.set_errors(self._error_list)
error_list.show()
# destroy dialog
self._destroy_ui()
# start next operation
if self._operation_queue is not None:
OperationQueue.start_next(self._operation_queue_name)
class DeleteOperation(Operation):
"""Operation thread used for deleting files"""
def __init__(self, application, provider):
Operation.__init__(self, application, provider)
# allow users to force deleting items
self._force_delete = False
def _create_dialog(self):
"""Create operation dialog"""
self._dialog = DeleteDialog(self._application, self)
def _remove_path(self, path):
"""Remove path"""
try:
# try removing specified path
self._source.remove_path(path, relative_to=self._source_path)
# push event to the queue
if self._source_queue is not None:
event = (MonitorSignals.DELETED, path, None)
self._source_queue.put(event, False)
except StandardError as error:
# problem removing path, ask user what to do
if Skip.REMOVE in self._response_cache:
response = self._response_cache[Skip.REMOVE]
else:
response = self._get_remove_error_input(error)
# try removing path again
if response == OperationError.RESPONSE_RETRY:
self._remove_path(path)
def _trash_path(self, path):
"""Move path to the trash"""
try:
# try trashing specified path
self._source.trash_path(path, relative_to=self._source_path)
# push event to the queue
if self._source_queue is not None:
event = (MonitorSignals.DELETED, path, None)
self._source_queue.put(event, False)
except TrashError as error:
# problem removing path, ask user what to do
if Skip.TRASH in self._response_cache:
response = self._response_cache[Skip.TRASH]
else:
response = self._get_trash_error_input(error)
# try moving path to trash again
if response == OperationError.RESPONSE_RETRY:
self._remove_path(path)
def set_force_delete(self, force):
"""Set forced deletion instead of trashing files"""
self._force_delete = force
def run(self):
"""Main thread method, this is where all the stuff is happening"""
self._file_list = self._selection_list[:] # use predefined selection list
# wait for operation queue if needed
if self._operation_queue is not None:
self._operation_queue.wait()
with gtk.gdk.lock:
# clear selection on source directory
parent = self._source.get_parent()
if self._source_path == parent.path:
parent.deselect_all()
# select removal method
trash_files = self._application.options.section('operations').get('trash_files')
trash_available = ProviderSupport.TRASH in self._source.get_support()
if self._force_delete:
remove_method = self._remove_path
else:
remove_method = (
self._remove_path,
self._trash_path
)[trash_files and trash_available]
# remove them
for index, item in enumerate(self._file_list, 1):
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
gobject.idle_add(self._dialog.set_current_file, item)
remove_method(item)
# update current count
if len(self._file_list) > 0:
gobject.idle_add(
self._dialog.set_current_file_fraction,
float(index) / len(self._file_list)
)
else:
# prevent division by zero
gobject.idle_add(self._dialog.set_current_file_fraction, 1)
# notify user if window is not focused
with gtk.gdk.lock:
if not self._dialog.is_active() and not self._application.is_active() and not self._abort.is_set():
notify_manager = self._application.notification_manager
title = _('Delete Operation')
message = ngettext(
'Removal of {0} item from "{1}" is completed!',
'Removal of {0} items from "{1}" is completed!',
len(self._file_list)
).format(
len(self._file_list),
os.path.basename(self._source_path)
)
# queue notification
notify_manager.notify(title, message)
# destroy dialog
self._destroy_ui()
# start next operation
if self._operation_queue is not None:
OperationQueue.start_next(self._operation_queue_name)
class RenameOperation(Operation):
"""Thread used for rename of large number of files"""
def __init__(self, application, provider, path, file_list):
Operation.__init__(self, application, provider)
self._destination = provider
self._destination_path = path
self._source_path = path
self._file_list = file_list
def _create_dialog(self):
"""Create operation dialog"""
self._dialog = RenameDialog(self._application, self)
def _rename_path(self, old_name, new_name, index):
"""Rename specified path"""
can_procede = True
try:
# check if specified path already exists
if self._destination.exists(new_name, relative_to=self._source_path):
can_procede, options = self._get_overwrite_input(new_name)
# get new name if user specified
if options[OverwriteOption.RENAME]:
new_name = os.path.join(
os.path.dirname(new_name),
options[OverwriteOption.NEW_NAME]
)
if not can_procede:
# user canceled overwrite, skip the file
self._file_list.pop(index)
return
else:
# rename path
self._source.rename_path(old_name, new_name, relative_to=self._source_path)
# push event to the queue
if self._source_queue is not None:
delete_event = (MonitorSignals.DELETE, old_name, None)
create_event = (MonitorSignals.CREATED, new_name, None)
self._source_queue.put(delete_event, False)
self._source_queue.put(create_event, False)
except StandardError as error:
# problem renaming path, ask user what to do
if Skip.RENAME in self._response_cache:
response = self._response_cache[Skip.RENAME]
else:
response = self._get_rename_error_input(error)
# try renaming path again
if response == OperationError.RESPONSE_RETRY:
self._remove_path(old_name, new_name, index)
else:
# user didn't want to retry, remove path from list
self._file_list.pop(index)
def run(self):
"""Main thread method, this is where all the stuff is happening"""
# wait for operation queue if needed
if self._operation_queue is not None:
self._operation_queue.wait()
for index, item in enumerate(self._file_list, 1):
if self._abort.is_set(): break # abort operation if requested
self._can_continue.wait() # pause lock
gobject.idle_add(self._dialog.set_current_file, item[0])
self._rename_path(item[0], item[1], index-1)
# update current count
if len(self._file_list) > 0:
gobject.idle_add(
self._dialog.set_current_file_fraction,
float(index) / len(self._file_list)
)
else:
# prevent division by zero
gobject.idle_add(self._dialog.set_current_file_fraction, 1)
# notify user if window is not focused
with gtk.gdk.lock:
if not self._dialog.is_active() and not self._application.is_active() and not self._abort.is_set():
notify_manager = self._application.notification_manager
title = _('Rename Operation')
message = ngettext(
'Rename of {0} item from "{1}" is completed!',
'Rename of {0} items from "{1}" is completed!',
len(self._file_list)
).format(
len(self._file_list),
os.path.basename(self._source_path)
)
# queue notification
notify_manager.notify(title, message)
# destroy dialog
self._destroy_ui()
# start next operation
if self._operation_queue is not None:
OperationQueue.start_next(self._operation_queue_name)
| gpl-3.0 |
impulze/paludis | python/version_operator_TEST.py | 5 | 1657 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 sw=4 sts=4 et :
#
# Copyright (c) 2007 Piotr Jaroszyński
#
# This file is part of the Paludis package manager. Paludis is free software;
# you can redistribute it and/or modify it under the terms of the GNU General
# Public License version 2, as published by the Free Software Foundation.
#
# Paludis is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
#
from paludis import *
import unittest
class TestCase_VersionOperator(unittest.TestCase):
def test_01_init(self):
VersionOperator("<")
VersionOperator(VersionOperatorValue.LESS)
def test_02_exceptions(self):
self.assertRaises(BadVersionOperatorError, VersionOperator, "<>")
def test_03_str(self):
self.assertEqual(">", str(VersionOperator(">")))
self.assertEqual("<", str(VersionOperator(VersionOperatorValue.LESS)))
def test_04_compare(self):
self.assert_(VersionOperator("<").compare(VersionSpec("1.0"), VersionSpec("2.0")))
self.assert_(VersionOperator(">").compare(VersionSpec("3.0"), VersionSpec("2.0")))
self.assert_(VersionOperator(VersionOperatorValue.STUPID_EQUAL_STAR).compare(VersionSpec("2.0.1-r1"), VersionSpec("2.0")))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
wummel/linkchecker | tests/test_fileutil.py | 9 | 1360 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test file utility functions.
"""
import unittest
import linkcheck.fileutil
file_existing = __file__
file_non_existing = "ZZZ.i_dont_exist"
class TestFileutil (unittest.TestCase):
"""Test file utility functions."""
def test_size (self):
self.assertTrue(linkcheck.fileutil.get_size(file_existing) > 0)
self.assertEqual(linkcheck.fileutil.get_size(file_non_existing), -1)
def test_mtime (self):
self.assertTrue(linkcheck.fileutil.get_mtime(file_existing) > 0)
self.assertEqual(linkcheck.fileutil.get_mtime(file_non_existing), 0)
| gpl-2.0 |
rjschof/gem5 | tests/long/se/60.bzip2/test.py | 56 | 1754 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import bzip2_source
workload = bzip2_source(isa, opsys, 'lgred')
root.system.cpu[0].workload = workload.makeLiveProcess()
| bsd-3-clause |
sujithshankar/anaconda | pyanaconda/constants.py | 1 | 6817 | #
# constants.py: anaconda constants
#
# Copyright (C) 2001 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Used for digits, ascii_letters, punctuation constants
import string # pylint: disable=deprecated-module
from pyanaconda.i18n import N_
# Use -1 to indicate that the selinux configuration is unset
SELINUX_DEFAULT = -1
# where to look for 3rd party addons
ADDON_PATHS = ["/usr/share/anaconda/addons"]
from pykickstart.constants import AUTOPART_TYPE_LVM
# common string needs to be easy to change
from pyanaconda import product
productName = product.productName
productVersion = product.productVersion
productArch = product.productArch
bugzillaUrl = product.bugUrl
isFinal = product.isFinal
# for use in device names, eg: "fedora", "rhel"
shortProductName = productName.lower() # pylint: disable=no-member
if productName.count(" "): # pylint: disable=no-member
shortProductName = ''.join(s[0] for s in shortProductName.split())
# DriverDisc Paths
DD_ALL = "/tmp/DD"
DD_FIRMWARE = "/tmp/DD/lib/firmware"
DD_RPMS = "/tmp/DD-*"
TRANSLATIONS_UPDATE_DIR = "/tmp/updates/po"
ANACONDA_CLEANUP = "anaconda-cleanup"
MOUNT_DIR = "/run/install"
DRACUT_REPODIR = "/run/install/repo"
DRACUT_ISODIR = "/run/install/source"
ISO_DIR = MOUNT_DIR + "/isodir"
IMAGE_DIR = MOUNT_DIR + "/image"
INSTALL_TREE = MOUNT_DIR + "/source"
BASE_REPO_NAME = "anaconda"
# NOTE: this should be LANG_TERRITORY.CODESET, e.g. en_US.UTF-8
DEFAULT_LANG = "en_US.UTF-8"
DEFAULT_VC_FONT = "eurlatgr"
DEFAULT_KEYBOARD = "us"
DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
# VNC questions
USEVNC = N_("Start VNC")
USETEXT = N_("Use text mode")
# Runlevel files
RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
# Network
NETWORK_CONNECTION_TIMEOUT = 45 # in seconds
NETWORK_CONNECTED_CHECK_INTERVAL = 0.1 # in seconds
# DBus
DEFAULT_DBUS_TIMEOUT = -1 # use default
# Thread names
THREAD_EXECUTE_STORAGE = "AnaExecuteStorageThread"
THREAD_STORAGE = "AnaStorageThread"
THREAD_STORAGE_WATCHER = "AnaStorageWatcher"
THREAD_CHECK_STORAGE = "AnaCheckStorageThread"
THREAD_CUSTOM_STORAGE_INIT = "AnaCustomStorageInit"
THREAD_WAIT_FOR_CONNECTING_NM = "AnaWaitForConnectingNMThread"
THREAD_PAYLOAD = "AnaPayloadThread"
THREAD_PAYLOAD_RESTART = "AnaPayloadRestartThread"
THREAD_INPUT_BASENAME = "AnaInputThread"
THREAD_SYNC_TIME_BASENAME = "AnaSyncTime"
THREAD_EXCEPTION_HANDLING_TEST = "AnaExceptionHandlingTest"
THREAD_LIVE_PROGRESS = "AnaLiveProgressThread"
THREAD_SOFTWARE_WATCHER = "AnaSoftwareWatcher"
THREAD_CHECK_SOFTWARE = "AnaCheckSoftwareThread"
THREAD_SOURCE_WATCHER = "AnaSourceWatcher"
THREAD_INSTALL = "AnaInstallThread"
THREAD_CONFIGURATION = "AnaConfigurationThread"
THREAD_FCOE = "AnaFCOEThread"
THREAD_ISCSI_DISCOVER = "AnaIscsiDiscoverThread"
THREAD_ISCSI_LOGIN = "AnaIscsiLoginThread"
THREAD_GEOLOCATION_REFRESH = "AnaGeolocationRefreshThread"
THREAD_DATE_TIME = "AnaDateTimeThread"
THREAD_TIME_INIT = "AnaTimeInitThread"
THREAD_DASDFMT = "AnaDasdfmtThread"
THREAD_KEYBOARD_INIT = "AnaKeyboardThread"
THREAD_ADD_LAYOUTS_INIT = "AnaAddLayoutsInitThread"
# Geolocation constants
# geolocation providers
# - values are used by the geoloc CLI/boot option
GEOLOC_PROVIDER_FEDORA_GEOIP = "provider_fedora_geoip"
GEOLOC_PROVIDER_HOSTIP = "provider_hostip"
GEOLOC_PROVIDER_GOOGLE_WIFI = "provider_google_wifi"
# geocoding provider
GEOLOC_GEOCODER_NOMINATIM = "geocoder_nominatim"
# default providers
GEOLOC_DEFAULT_PROVIDER = GEOLOC_PROVIDER_FEDORA_GEOIP
GEOLOC_DEFAULT_GEOCODER = GEOLOC_GEOCODER_NOMINATIM
# timeout (in seconds)
GEOLOC_TIMEOUT = 3
ANACONDA_ENVIRON = "anaconda"
FIRSTBOOT_ENVIRON = "firstboot"
# Tainted hardware
UNSUPPORTED_HW = 1 << 28
# Password validation
PASSWORD_MIN_LEN = 8
PASSWORD_EMPTY_ERROR = N_("The password is empty.")
PASSWORD_CONFIRM_ERROR_GUI = N_("The passwords do not match.")
PASSWORD_CONFIRM_ERROR_TUI = N_("The passwords you entered were different. Please try again.")
PASSWORD_WEAK = N_("The password you have provided is weak. %s")
PASSWORD_WEAK_WITH_ERROR = N_("The password you have provided is weak: %s.")
PASSWORD_WEAK_CONFIRM = N_("You have provided a weak password. Press Done again to use anyway.")
PASSWORD_WEAK_CONFIRM_WITH_ERROR = N_("You have provided a weak password: %s. Press Done again to use anyway.")
PASSWORD_ASCII = N_("The password you have provided contains non-ASCII characters. You may not be able to switch between keyboard layouts to login. Press Done to continue.")
PASSWORD_DONE_TWICE = N_("You will have to press Done twice to confirm it.")
PASSWORD_STRENGTH_DESC = [N_("Empty"), N_("Weak"), N_("Fair"), N_("Good"), N_("Strong")]
# the number of seconds we consider a noticeable freeze of the UI
NOTICEABLE_FREEZE = 0.1
# all ASCII characters
PW_ASCII_CHARS = string.digits + string.ascii_letters + string.punctuation + " "
# Recognizing a tarfile
TAR_SUFFIX = (".tar", ".tbz", ".tgz", ".txz", ".tar.bz2", "tar.gz", "tar.xz")
# screenshots
SCREENSHOTS_DIRECTORY = "/tmp/anaconda-screenshots"
SCREENSHOTS_TARGET_DIRECTORY = "/root/anaconda-screenshots"
# cmdline arguments that append instead of overwrite
CMDLINE_APPEND = ["modprobe.blacklist", "ifname"]
DEFAULT_AUTOPART_TYPE = AUTOPART_TYPE_LVM
# Default to these units when reading user input when no units given
SIZE_UNITS_DEFAULT = "MiB"
# Constants for reporting status to IPMI. These are from the IPMI spec v2 rev1.1, page 512.
IPMI_STARTED = 0x7 # installation started
IPMI_FINISHED = 0x8 # installation finished successfully
IPMI_ABORTED = 0x9 # installation finished unsuccessfully, due to some non-exn error
IPMI_FAILED = 0xA # installation hit an exception
# for how long (in seconds) we try to wait for enough entropy for LUKS
# keep this a multiple of 60 (minutes)
MAX_ENTROPY_WAIT = 10 * 60
# X display number to use
X_DISPLAY_NUMBER = 1
# Payload status messages
PAYLOAD_STATUS_PROBING_STORAGE = N_("Probing storage...")
PAYLOAD_STATUS_PACKAGE_MD = N_("Downloading package metadata...")
PAYLOAD_STATUS_GROUP_MD = N_("Downloading group metadata...")
# Window title text
WINDOW_TITLE_TEXT = N_("Anaconda Installer")
| gpl-2.0 |
mwclient/mwclient | mwclient/page.py | 1 | 20723 | import six
from six import text_type
import time
from mwclient.util import parse_timestamp
import mwclient.listing
import mwclient.errors
class Page(object):
def __init__(self, site, name, info=None, extra_properties=None):
if type(name) is type(self):
self.__dict__.update(name.__dict__)
return
self.site = site
self.name = name
self._textcache = {}
if not info:
if extra_properties:
prop = 'info|' + '|'.join(six.iterkeys(extra_properties))
extra_props = []
for extra_prop in six.itervalues(extra_properties):
extra_props.extend(extra_prop)
else:
prop = 'info'
extra_props = ()
if type(name) is int:
info = self.site.get('query', prop=prop, pageids=name,
inprop='protection', *extra_props)
else:
info = self.site.get('query', prop=prop, titles=name,
inprop='protection', *extra_props)
info = six.next(six.itervalues(info['query']['pages']))
self._info = info
if 'invalid' in info:
raise mwclient.errors.InvalidPageTitle(info.get('invalidreason'))
self.namespace = info.get('ns', 0)
self.name = info.get('title', u'')
if self.namespace:
self.page_title = self.strip_namespace(self.name)
else:
self.page_title = self.name
self.base_title = self.page_title.split('/')[0]
self.base_name = self.name.split('/')[0]
self.touched = parse_timestamp(info.get('touched'))
self.revision = info.get('lastrevid', 0)
self.exists = 'missing' not in info
self.length = info.get('length')
self.protection = {
i['type']: (i['level'], i['expiry'])
for i in info.get('protection', ())
if i
}
self.redirect = 'redirect' in info
self.pageid = info.get('pageid', None)
self.contentmodel = info.get('contentmodel', None)
self.pagelanguage = info.get('pagelanguage', None)
self.restrictiontypes = info.get('restrictiontypes', None)
self.last_rev_time = None
self.edit_time = None
def redirects_to(self):
""" Get the redirect target page, or None if the page is not a redirect."""
info = self.site.get('query', prop='pageprops', titles=self.name, redirects='')
if 'redirects' in info['query']:
for page in info['query']['redirects']:
if page['from'] == self.name:
return Page(self.site, page['to'])
return None
else:
return None
def resolve_redirect(self):
""" Get the redirect target page, or the current page if its not a redirect."""
target_page = self.redirects_to()
if target_page is None:
return self
else:
return target_page
def __repr__(self):
return "<Page object '%s' for %s>" % (self.name.encode('utf-8'), self.site)
def __unicode__(self):
return self.name
@staticmethod
def strip_namespace(title):
if title[0] == ':':
title = title[1:]
return title[title.find(':') + 1:]
@staticmethod
def normalize_title(title):
# TODO: Make site dependent
title = title.strip()
if title[0] == ':':
title = title[1:]
title = title[0].upper() + title[1:]
title = title.replace(' ', '_')
return title
def can(self, action):
"""Check if the current user has the right to carry out some action
with the current page.
Example:
>>> page.can('edit')
True
"""
level = self.protection.get(action, (action,))[0]
if level == 'sysop':
level = 'editprotected'
return level in self.site.rights
def get_token(self, type, force=False):
return self.site.get_token(type, force, title=self.name)
def text(self, section=None, expandtemplates=False, cache=True, slot='main'):
"""Get the current wikitext of the page, or of a specific section.
If the page does not exist, an empty string is returned. By
default, results will be cached and if you call text() again
with the same section and expandtemplates the result will come
from the cache. The cache is stored on the instance, so it
lives as long as the instance does.
Args:
section (int): Section number, to only get text from a single section.
expandtemplates (bool): Expand templates (default: `False`)
cache (bool): Use in-memory caching (default: `True`)
"""
if not self.can('read'):
raise mwclient.errors.InsufficientPermission(self)
if not self.exists:
return u''
if section is not None:
section = text_type(section)
key = hash((section, expandtemplates))
if cache and key in self._textcache:
return self._textcache[key]
revs = self.revisions(prop='content|timestamp', limit=1, section=section,
slots=slot)
try:
rev = next(revs)
if 'slots' in rev:
text = rev['slots'][slot]['*']
else:
text = rev['*']
self.last_rev_time = rev['timestamp']
except StopIteration:
text = u''
self.last_rev_time = None
if not expandtemplates:
self.edit_time = time.gmtime()
else:
# The 'rvexpandtemplates' option was removed in MediaWiki 1.32, so we have to
# make an extra API call, see https://github.com/mwclient/mwclient/issues/214
text = self.site.expandtemplates(text)
if cache:
self._textcache[key] = text
return text
def save(self, *args, **kwargs):
"""Alias for edit, for maintaining backwards compatibility."""
return self.edit(*args, **kwargs)
def edit(self, text, summary=u'', minor=False, bot=True, section=None, **kwargs):
"""Update the text of a section or the whole page by performing an edit operation.
"""
return self._edit(summary, minor, bot, section, text=text, **kwargs)
def append(self, text, summary=u'', minor=False, bot=True, section=None,
**kwargs):
"""Append text to a section or the whole page by performing an edit operation.
"""
return self._edit(summary, minor, bot, section, appendtext=text, **kwargs)
def prepend(self, text, summary=u'', minor=False, bot=True, section=None,
**kwargs):
"""Prepend text to a section or the whole page by performing an edit operation.
"""
return self._edit(summary, minor, bot, section, prependtext=text, **kwargs)
def _edit(self, summary, minor, bot, section, **kwargs):
if not self.site.logged_in and self.site.force_login:
raise mwclient.errors.AssertUserFailedError()
if self.site.blocked:
raise mwclient.errors.UserBlocked(self.site.blocked)
if not self.can('edit'):
raise mwclient.errors.ProtectedPageError(self)
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if minor:
data['minor'] = '1'
if not minor:
data['notminor'] = '1'
if self.last_rev_time:
data['basetimestamp'] = time.strftime('%Y%m%d%H%M%S', self.last_rev_time)
if self.edit_time:
data['starttimestamp'] = time.strftime('%Y%m%d%H%M%S', self.edit_time)
if bot:
data['bot'] = '1'
if section is not None:
data['section'] = section
data.update(kwargs)
if self.site.force_login:
data['assert'] = 'user'
def do_edit():
result = self.site.post('edit', title=self.name, summary=summary,
token=self.get_token('edit'),
**data)
if result['edit'].get('result').lower() == 'failure':
raise mwclient.errors.EditError(self, result['edit'])
return result
try:
result = do_edit()
except mwclient.errors.APIError as e:
if e.code == 'badtoken':
# Retry, but only once to avoid an infinite loop
self.get_token('edit', force=True)
try:
result = do_edit()
except mwclient.errors.APIError as e:
self.handle_edit_error(e, summary)
else:
self.handle_edit_error(e, summary)
# 'newtimestamp' is not included if no change was made
if 'newtimestamp' in result['edit'].keys():
self.last_rev_time = parse_timestamp(result['edit'].get('newtimestamp'))
# Workaround for https://phabricator.wikimedia.org/T211233
for cookie in self.site.connection.cookies:
if 'PostEditRevision' in cookie.name:
self.site.connection.cookies.clear(cookie.domain, cookie.path,
cookie.name)
# clear the page text cache
self._textcache = {}
return result['edit']
def handle_edit_error(self, e, summary):
if e.code == 'editconflict':
raise mwclient.errors.EditError(self, summary, e.info)
elif e.code in {'protectedtitle', 'cantcreate', 'cantcreate-anon',
'noimageredirect-anon', 'noimageredirect', 'noedit-anon',
'noedit', 'protectedpage', 'cascadeprotected',
'customcssjsprotected',
'protectednamespace-interface', 'protectednamespace'}:
raise mwclient.errors.ProtectedPageError(self, e.code, e.info)
elif e.code == 'assertuserfailed':
raise mwclient.errors.AssertUserFailedError()
else:
raise e
def touch(self):
"""Perform a "null edit" on the page to update the wiki's cached data of it.
This is useful in contrast to purge when needing to update stored data on a wiki,
for example Semantic MediaWiki properties or Cargo table values, since purge
only forces update of a page's displayed values and not its store.
"""
if not self.exists:
return
self.append('')
def move(self, new_title, reason='', move_talk=True, no_redirect=False):
"""Move (rename) page to new_title.
If user account is an administrator, specify no_redirect as True to not
leave a redirect.
If user does not have permission to move page, an InsufficientPermission
exception is raised.
"""
if not self.can('move'):
raise mwclient.errors.InsufficientPermission(self)
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if move_talk:
data['movetalk'] = '1'
if no_redirect:
data['noredirect'] = '1'
result = self.site.post('move', ('from', self.name), to=new_title,
token=self.get_token('move'), reason=reason, **data)
return result['move']
def delete(self, reason='', watch=False, unwatch=False, oldimage=False):
"""Delete page.
If user does not have permission to delete page, an InsufficientPermission
exception is raised.
"""
if not self.can('delete'):
raise mwclient.errors.InsufficientPermission(self)
if not self.site.writeapi:
raise mwclient.errors.NoWriteApi(self)
data = {}
if watch:
data['watch'] = '1'
if unwatch:
data['unwatch'] = '1'
if oldimage:
data['oldimage'] = oldimage
result = self.site.post('delete', title=self.name,
token=self.get_token('delete'),
reason=reason, **data)
return result['delete']
def purge(self):
"""Purge server-side cache of page. This will re-render templates and other
dynamic content.
"""
self.site.post('purge', titles=self.name)
# def watch: requires 1.14
# Properties
def backlinks(self, namespace=None, filterredir='all', redirect=False,
limit=None, generator=True):
"""List pages that link to the current page, similar to Special:Whatlinkshere.
API doc: https://www.mediawiki.org/wiki/API:Backlinks
"""
prefix = mwclient.listing.List.get_prefix('bl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(
prefix, namespace=namespace, filterredir=filterredir,
))
if redirect:
kwargs['%sredirect' % prefix] = '1'
kwargs[prefix + 'title'] = self.name
return mwclient.listing.List.get_list(generator)(
self.site, 'backlinks', 'bl', limit=limit, return_values='title',
**kwargs
)
def categories(self, generator=True, show=None):
"""List categories used on the current page.
API doc: https://www.mediawiki.org/wiki/API:Categories
Args:
generator (bool): Return generator (Default: True)
show (str): Set to 'hidden' to only return hidden categories
or '!hidden' to only return non-hidden ones.
Returns:
mwclient.listings.PagePropertyGenerator
"""
prefix = mwclient.listing.List.get_prefix('cl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(
prefix, show=show
))
if generator:
return mwclient.listing.PagePropertyGenerator(
self, 'categories', 'cl', **kwargs
)
else:
# TODO: return sortkey if wanted
return mwclient.listing.PageProperty(
self, 'categories', 'cl', return_values='title', **kwargs
)
def embeddedin(self, namespace=None, filterredir='all', limit=None, generator=True):
"""List pages that transclude the current page.
API doc: https://www.mediawiki.org/wiki/API:Embeddedin
Args:
namespace (int): Restricts search to a given namespace (Default: None)
filterredir (str): How to filter redirects, either 'all' (default),
'redirects' or 'nonredirects'.
limit (int): Maximum amount of pages to return per request
generator (bool): Return generator (Default: True)
Returns:
mwclient.listings.List: Page iterator
"""
prefix = mwclient.listing.List.get_prefix('ei', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace,
filterredir=filterredir))
kwargs[prefix + 'title'] = self.name
return mwclient.listing.List.get_list(generator)(
self.site, 'embeddedin', 'ei', limit=limit, return_values='title',
**kwargs
)
def extlinks(self):
"""List external links from the current page.
API doc: https://www.mediawiki.org/wiki/API:Extlinks
"""
return mwclient.listing.PageProperty(self, 'extlinks', 'el', return_values='*')
def images(self, generator=True):
"""List files/images embedded in the current page.
API doc: https://www.mediawiki.org/wiki/API:Images
"""
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'images', '')
else:
return mwclient.listing.PageProperty(self, 'images', '',
return_values='title')
def iwlinks(self):
"""List interwiki links from the current page.
API doc: https://www.mediawiki.org/wiki/API:Iwlinks
"""
return mwclient.listing.PageProperty(self, 'iwlinks', 'iw',
return_values=('prefix', '*'))
def langlinks(self, **kwargs):
"""List interlanguage links from the current page.
API doc: https://www.mediawiki.org/wiki/API:Langlinks
"""
return mwclient.listing.PageProperty(self, 'langlinks', 'll',
return_values=('lang', '*'),
**kwargs)
def links(self, namespace=None, generator=True, redirects=False):
"""List links to other pages from the current page.
API doc: https://www.mediawiki.org/wiki/API:Links
"""
prefix = mwclient.listing.List.get_prefix('pl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace))
if redirects:
kwargs['redirects'] = '1'
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'links', 'pl', **kwargs)
else:
return mwclient.listing.PageProperty(self, 'links', 'pl',
return_values='title', **kwargs)
def revisions(self, startid=None, endid=None, start=None, end=None,
dir='older', user=None, excludeuser=None, limit=50,
prop='ids|timestamp|flags|comment|user',
expandtemplates=False, section=None,
diffto=None, slots=None, uselang=None):
"""List revisions of the current page.
API doc: https://www.mediawiki.org/wiki/API:Revisions
Args:
startid (int): Revision ID to start listing from.
endid (int): Revision ID to stop listing at.
start (str): Timestamp to start listing from.
end (str): Timestamp to end listing at.
dir (str): Direction to list in: 'older' (default) or 'newer'.
user (str): Only list revisions made by this user.
excludeuser (str): Exclude revisions made by this user.
limit (int): The maximum number of revisions to return per request.
prop (str): Which properties to get for each revision,
default: 'ids|timestamp|flags|comment|user'
expandtemplates (bool): Expand templates in rvprop=content output
section (int): Section number. If rvprop=content is set, only the contents
of this section will be retrieved.
diffto (str): Revision ID to diff each revision to. Use "prev", "next" and
"cur" for the previous, next and current revision respectively.
slots (str): The content slot (Mediawiki >= 1.32) to retrieve content from.
uselang (str): Language to use for parsed edit comments and other localized
messages.
Returns:
mwclient.listings.List: Revision iterator
"""
kwargs = dict(mwclient.listing.List.generate_kwargs(
'rv', startid=startid, endid=endid, start=start, end=end, user=user,
excludeuser=excludeuser, diffto=diffto, slots=slots
))
if self.site.version[:2] < (1, 32) and 'rvslots' in kwargs:
# https://github.com/mwclient/mwclient/issues/199
del kwargs['rvslots']
kwargs['rvdir'] = dir
kwargs['rvprop'] = prop
kwargs['uselang'] = uselang
if expandtemplates:
kwargs['rvexpandtemplates'] = '1'
if section is not None:
kwargs['rvsection'] = section
return mwclient.listing.RevisionsIterator(self, 'revisions', 'rv', limit=limit,
**kwargs)
def templates(self, namespace=None, generator=True):
"""List templates used on the current page.
API doc: https://www.mediawiki.org/wiki/API:Templates
"""
prefix = mwclient.listing.List.get_prefix('tl', generator)
kwargs = dict(mwclient.listing.List.generate_kwargs(prefix, namespace=namespace))
if generator:
return mwclient.listing.PagePropertyGenerator(self, 'templates', prefix,
**kwargs)
else:
return mwclient.listing.PageProperty(self, 'templates', prefix,
return_values='title', **kwargs)
| mit |
l-hedgehog/bedrock | bedrock/facebookapps/tests/test_utils.py | 25 | 5007 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import urllib
from django.conf import settings
from django.utils.translation import get_language
from mock import Mock, patch
from nose.tools import eq_, ok_
from bedrock.facebookapps import utils
from bedrock.facebookapps import tests
DUMMY_FACEBOOK_LOCALES = ['en-GB', 'en-US', 'en', 'es-ES', 'es-MX']
class TestUnwrapSignedRequest(tests.TestCase):
def setUp(self):
self.request = Mock(['REQUEST'])
self.request.REQUEST = {}
def test_empty_signed_request(self):
"""
If signed_request isn't set, should return empty dict.
"""
eq_(utils.unwrap_signed_request(self.request), {})
def test_change_locale_to_hyphen(self):
"""
Should convert Facebook's underscore locales to hyphen locales.
"""
payload = tests.create_payload(locale='en_GB')
signed_request = tests.create_signed_request(payload)
self.request.REQUEST['signed_request'] = signed_request
unwrapped_payload = utils.unwrap_signed_request(self.request)
eq_(unwrapped_payload['user']['locale'], 'en-GB')
def test_normal_unwrap(self):
"""
Should unwrap and return the encoded dictionary.
"""
payload = tests.create_payload(locale='en_GB')
signed_request = tests.create_signed_request(payload)
self.request.REQUEST['signed_request'] = signed_request
# Use hyphen in payload's locale to match util's transformation
payload['user']['locale'] = 'en-GB'
eq_(utils.unwrap_signed_request(self.request), payload)
class TestAppDataQueryStringEncode(tests.TestCase):
def test_app_data_query_string_encode(self):
app_data_dict = {
'foo': 'bar!',
'baz': 'fooz',
'scene': 'some-scene',
}
query_string = utils.app_data_query_string_encode(app_data_dict)
eq_(urllib.unquote(query_string), 'app_data[foo]=bar!&'
'app_data[baz]=fooz&app_data[scene]=some-scene')
@patch.object(settings, 'FACEBOOK_LOCALES', DUMMY_FACEBOOK_LOCALES)
class TestGetBestLocale(tests.TestCase):
"""
Locales should be compared in lowercase because get_best_locale can return
lowercase from get_language or the expected lowercase language and upper
case country, as taken directly from FACEBOOK_LOCALES.
"""
def setUp(self):
self.tested_locales = ['en-GB', 'en-ZA', 'es-AR', 'fu-BR']
def test_supported_locale(self):
"""
Return the given locale if supported.
"""
eq_(utils.get_best_locale('en-GB').lower(), 'en-gb')
def test_locale_for_activated_language(self):
"""
If the locale isn't supported, try to activate just the language code
and return the resulting locale if supported.
"""
eq_(utils.get_best_locale('en-ZA').lower(), 'en')
def test_first_supported_locale_for_language(self):
"""
If neither the given locale or the locale resulting from activating the
language code are supported, iterate through the supported locales and
return the first one that matches the language.
"""
eq_(utils.get_best_locale('es-AR').lower(), 'es-es')
def test_unsupported_locale(self):
"""
Return the default en-US when locale isn't supported.
"""
eq_(utils.get_best_locale('ar-LB').lower(), 'en-us')
def test_always_returns_supported_locale(self):
"""
Always return a supported locale.
"""
supported_locales = [locale.lower()
for locale in settings.FACEBOOK_LOCALES]
for locale in self.tested_locales:
best_locale = utils.get_best_locale(locale).lower()
ok_(best_locale in supported_locales, 'The locale {best} (returned'
' for {locale}) is not a supported locale {supported}.'
.format(locale=locale, best=best_locale,
supported=supported_locales))
def test_locale_remains_unchanged(self):
"""
Always preserve the active locale.
"""
lang = 'pt-BR'
with self.activate(lang):
for locale in self.tested_locales:
utils.get_best_locale(locale)
eq_(get_language().lower(), lang.lower())
class TestJsRedirect(tests.TestCase):
def setUp(self):
self.request = Mock(['locale', 'GET'])
self.request.locale = 'en-US'
self.request.GET = {}
self.url = 'https://www.mozilla.org/'
self.response = utils.js_redirect(self.url, self.request)
def test_js_redirect(self):
"""
Response should be HTML to be used by JavaScript redirect code.
"""
self.assert_js_redirect(self.response, self.url)
| mpl-2.0 |
jeehyn/NewWorld_kernel_ef52 | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/boto/ec2/instanceinfo.py | 152 | 1893 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class InstanceInfo(object):
"""
Represents an EC2 Instance status response from CloudWatch
"""
def __init__(self, connection=None, id=None, state=None):
"""
:ivar str id: The instance's EC2 ID.
:ivar str state: Specifies the current status of the instance.
"""
self.connection = connection
self.id = id
self.state = state
def __repr__(self):
return 'InstanceInfo:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId' or name == 'InstanceId':
self.id = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
| mit |
Wakeupbuddy/pexpect | examples/chess.py | 18 | 4483 | #!/usr/bin/env python
'''This demonstrates controlling a screen oriented application (curses).
It starts two instances of gnuchess and then pits them against each other.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
from __future__ import absolute_import
import pexpect
import ANSI
REGEX_MOVE = '(?:[a-z]|\x1b\[C)(?:[0-9]|\x1b\[C)(?:[a-z]|\x1b\[C)(?:[0-9]|\x1b\[C)'
REGEX_MOVE_PART = '(?:[0-9]|\x1b\[C)(?:[a-z]|\x1b\[C)(?:[0-9]|\x1b\[C)'
class Chess:
def __init__(self, engine = "/usr/local/bin/gnuchess -a -h 1"):
self.child = pexpect.spawn (engine)
self.term = ANSI.ANSI ()
self.child.expect ('Chess')
if self.child.after != 'Chess':
raise IOError('incompatible chess program')
self.term.process_list (self.before)
self.term.process_list (self.after)
self.last_computer_move = ''
def read_until_cursor (self, r,c):
while 1:
self.child.read(1, 60)
self.term.process (c)
if self.term.cur_r == r and self.term.cur_c == c:
return 1
def do_first_move (self, move):
self.child.expect ('Your move is')
self.child.sendline (move)
self.term.process_list (self.before)
self.term.process_list (self.after)
return move
def do_move (self, move):
self.read_until_cursor (19,60)
self.child.sendline (move)
return move
def get_first_computer_move (self):
self.child.expect ('My move is')
self.child.expect (REGEX_MOVE)
return self.child.after
def get_computer_move (self):
print('Here')
i = self.child.expect (['\[17;59H', '\[17;58H'])
print(i)
if i == 0:
self.child.expect (REGEX_MOVE)
if len(self.child.after) < 4:
self.child.after = self.child.after + self.last_computer_move[3]
if i == 1:
self.child.expect (REGEX_MOVE_PART)
self.child.after = self.last_computer_move[0] + self.child.after
print('', self.child.after)
self.last_computer_move = self.child.after
return self.child.after
def switch (self):
self.child.sendline ('switch')
def set_depth (self, depth):
self.child.sendline ('depth')
self.child.expect ('depth=')
self.child.sendline ('%d' % depth)
def quit(self):
self.child.sendline ('quit')
import sys
print('Starting...')
white = Chess()
white.child.echo = 1
white.child.expect ('Your move is')
white.set_depth(2)
white.switch()
move_white = white.get_first_computer_move()
print('first move white:', move_white)
white.do_move ('e7e5')
move_white = white.get_computer_move()
print('move white:', move_white)
white.do_move ('f8c5')
move_white = white.get_computer_move()
print('move white:', move_white)
white.do_move ('b8a6')
move_white = white.get_computer_move()
print('move white:', move_white)
sys.exit(1)
black = Chess()
white = Chess()
white.child.expect ('Your move is')
white.switch()
move_white = white.get_first_computer_move()
print('first move white:', move_white)
black.do_first_move (move_white)
move_black = black.get_first_computer_move()
print('first move black:', move_black)
white.do_move (move_black)
done = 0
while not done:
move_white = white.get_computer_move()
print('move white:', move_white)
black.do_move (move_white)
move_black = black.get_computer_move()
print('move black:', move_black)
white.do_move (move_black)
print('tail of loop')
g.quit()
| isc |
cliqz/socorro | socorro/external/es/index_creator.py | 10 | 3323 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import elasticsearch
import json
import os
from configman import Namespace, RequiredConfig
from configman.converters import class_converter
from socorro.external.es.super_search_fields import SuperSearchFields
DIRECTORY = os.path.dirname(os.path.abspath(__file__))
class IndexCreator(RequiredConfig):
"""The elasticsearch-py library is split into a handful of functional
areas; this class is concerned with IndicesClient only.
"""
required_config = Namespace()
required_config.add_option(
'elasticsearch_emails_index_settings',
default='%s/mappings/socorro_emails_index_settings.json' % DIRECTORY,
doc='the file containing the mapping of the indexes receiving '
'email addresses for the automatic-emails cron job',
)
required_config.add_option(
'elasticsearch_emails_index',
default='socorro_emails',
doc='the index that handles data about email addresses for '
'the automatic-emails cron job',
)
required_config.elasticsearch = Namespace()
required_config.elasticsearch.add_option(
'elasticsearch_class',
default='socorro.external.es.connection_context.ConnectionContext',
from_string_converter=class_converter,
reference_value_from='resource.elasticsearch',
)
def __init__(self, config):
super(IndexCreator, self).__init__()
self.config = config
self.es_context = self.config.elasticsearch.elasticsearch_class(
config=self.config.elasticsearch
)
def get_index_client(self):
"""Maintained for interoperability purposes elsewhere in the codebase.
"""
return self.es_context.indices_client()
def create_socorro_index(self, es_index):
"""Create an index that will receive crash reports. """
es_settings = SuperSearchFields(config=self.config).get_mapping()
self.create_index(es_index, es_settings)
def create_emails_index(self):
"""Create an index that will receive email addresses for the
automatic-emails cron job. """
es_index = self.config.elasticsearch_emails_index
settings_json = open(
self.config.elasticsearch_emails_index_settings
).read()
es_settings = json.loads(settings_json)
self.create_index(es_index, es_settings)
def create_index(self, es_index, es_settings):
"""Create an index in elasticsearch, with specified settings.
If the index already exists or is created concurrently during the
execution of this function, nothing will happen.
"""
try:
client = self.es_context.indices_client()
client.create(
index=es_index,
body=es_settings,
)
self.config.logger.info(
'Created new elasticsearch index: %s', es_index
)
except elasticsearch.exceptions.RequestError, e:
# If this index already exists, swallow the error.
if 'IndexAlreadyExistsException' not in str(e):
raise
| mpl-2.0 |
dmittov/AlcoBot | bot.py | 1 | 2312 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import logging
import telegram
import cocktail
from time import sleep
from urllib2 import URLError
def main():
logging.basicConfig(
level=logging.DEBUG,
filename='debug.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Telegram Bot Authorization Token
TOKEN = None
with open('prod.token') as fh:
TOKEN = fh.readline()
logging.info(TOKEN)
bot = telegram.Bot(TOKEN)
try:
update_id = bot.getUpdates()[0].update_id
except IndexError:
update_id = None
while True:
try:
update_id = response(bot, update_id)
except telegram.TelegramError as e:
# These are network problems with Telegram.
if e.message in ("Bad Gateway", "Timed out"):
sleep(1)
elif e.message == "Unauthorized":
# The user has removed or blocked the bot.
update_id += 1
else:
raise e
except URLError as e:
sleep(1)
def response(bot, update_id):
# Request updates after the last update_id
for update in bot.getUpdates(offset=update_id, timeout=10):
# chat_id is required to reply to any message
chat_id = update.message.chat_id
update_id = update.update_id + 1
try:
message = cocktail.coctail_msg(update.message.text)
except Exception as e:
message = e.message
if message:
bot.sendMessage(chat_id=chat_id,
text=message)
return update_id
if __name__ == '__main__':
main()
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/Django/django/contrib/localflavor/cl/forms.py | 101 | 3238 | """
Chile specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from .cl_regions import REGION_CHOICES
class CLRegionSelect(Select):
"""
A Select widget that uses a list of Chilean Regions (Regiones)
as its choices.
"""
def __init__(self, attrs=None):
super(CLRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class CLRutField(RegexField):
"""
Chilean "Rol Unico Tributario" (RUT) field. This is the Chilean national
identification number.
Samples for testing are available from
https://palena.sii.cl/cvc/dte/ee_empresas_emisoras.html
"""
default_error_messages = {
'invalid': _('Enter a valid Chilean RUT.'),
'strict': _('Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.'),
'checksum': _('The Chilean RUT is not valid.'),
}
def __init__(self, *args, **kwargs):
if 'strict' in kwargs:
del kwargs['strict']
super(CLRutField, self).__init__(r'^(\d{1,2}\.)?\d{3}\.\d{3}-[\dkK]$',
error_message=self.default_error_messages['strict'], *args, **kwargs)
else:
# In non-strict mode, accept RUTs that validate but do not exist in
# the real world.
super(CLRutField, self).__init__(r'^[\d\.]{1,11}-?[\dkK]$', *args, **kwargs)
def clean(self, value):
"""
Check and clean the Chilean RUT.
"""
super(CLRutField, self).clean(value)
if value in EMPTY_VALUES:
return ''
rut, verificador = self._canonify(value)
if self._algorithm(rut) == verificador:
return self._format(rut, verificador)
else:
raise ValidationError(self.error_messages['checksum'])
def _algorithm(self, rut):
"""
Takes RUT in pure canonical form, calculates the verifier digit.
"""
suma = 0
multi = 2
for r in rut[::-1]:
suma += int(r) * multi
multi += 1
if multi == 8:
multi = 2
return '0123456789K0'[11 - suma % 11]
def _canonify(self, rut):
"""
Turns the RUT into one normalized format. Returns a (rut, verifier)
tuple.
"""
rut = smart_text(rut).replace(' ', '').replace('.', '').replace('-', '')
return rut[:-1], rut[-1].upper()
def _format(self, code, verifier=None):
"""
Formats the RUT from canonical form to the common string representation.
If verifier=None, then the last digit in 'code' is the verifier.
"""
if verifier is None:
verifier = code[-1]
code = code[:-1]
while len(code) > 3 and '.' not in code[:3]:
pos = code.find('.')
if pos == -1:
new_dot = -3
else:
new_dot = pos - 3
code = code[:new_dot] + '.' + code[new_dot:]
return '%s-%s' % (code, verifier)
| agpl-3.0 |
nullzero/wprobot | wp/ltime.py | 1 | 1990 | # -*- coding: utf-8 -*-
"""
Library to manage everything about date and time.
"""
__version__ = "1.0.2"
__author__ = "Sorawee Porncharoenwase"
import datetime
import time
def wrapMonth(m):
"""Convert zero-based month number to zero-based month number."""
m -= 1
if m < 0:
m += 12
if m >= 12:
m -= 12
return m
def weekdayThai(d):
"""Return Thai name of days of the week."""
return map(lambda x: u"วัน" + x,
[u"จันทร์", u"อังคาร", u"พุธ", u"พฤหัสบดี", u"ศุกร์",
u"เสาร์", u"อาทิตย์"])[d]
def monthEng(m):
"""Return English name of month."""
return [u"January", u"February", u"March", u"April", u"May", u"June",
u"July", u"August", u"September", u"October", u"November",
u"December"][wrapMonth(m)]
def monthThai(m):
"""Return Thai name of month."""
return [u"มกราคม", u"กุมภาพันธ์", u"มีนาคม", u"เมษายน", u"พฤษภาคม",
u"มิถุนายน", u"กรกฎาคม", u"สิงหาคม", u"กันยายน", u"ตุลาคม",
u"พฤศจิกายน", u"ธันวาคม"][wrapMonth(m)]
def monthThaiAbbr(m):
"""Return Thai abbreviated name of month."""
return [u"ม.ค.", u"ก.พ.", u"มี.ค.", u"เม.ย.", u"พ.ค.", u"มิ.ย.",
u"ก.ค.", u"ส.ค.", u"ก.ย.", u"ต.ค.", u"พ.ย.", u"ธ.ค."][wrapMonth(m)]
def getNumDay(year, month):
"""Return length of day in given month"""
if month == 2:
if year % 400 == 0:
return 29
elif year % 100 == 0:
return 28
elif year % 4 == 0:
return 29
else:
return 28
return [0, 31, 0, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
td = datetime.timedelta
sleep = time.sleep
| mit |
h3llrais3r/SickRage | lib/sqlalchemy/schema.py | 75 | 1103 | # schema.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatiblity namespace for sqlalchemy.sql.schema and related.
"""
from .sql.base import (
SchemaVisitor
)
from .sql.schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
DefaultGenerator,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
SchemaItem,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
_get_table_key,
ColumnCollectionConstraint,
)
from .sql.naming import conv
from .sql.ddl import (
DDL,
CreateTable,
DropTable,
CreateSequence,
DropSequence,
CreateIndex,
DropIndex,
CreateSchema,
DropSchema,
_DropView,
CreateColumn,
AddConstraint,
DropConstraint,
DDLBase,
DDLElement,
_CreateDropBase,
_DDLCompiles
)
| gpl-3.0 |
ovilab/atomify-lammps | libs/lammps/tools/moltemplate/moltemplate/dump2data.py | 8 | 62572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dump2data.py
Extract dynamical degrees of freedom from a lammps DUMP file (from the stdin)
and construct a new DATA file (to the stdout).
A reference DATA file is needed (argument).
basic usage
./dump2data.py orig_file.data < dump.lammpstrj > new_file.data
(This extract last frame, uses "full" atom_style.)
options:
./dump2data.py [-t t -atomstyle style] orig.data < dump.lammpstrj > new.data
"""
# Authors: Andrew Jewett
# License: New BSD License
# Copyright (c) 2014
# All rights reserved.
g_program_name = 'dump2data.py'
g_date_str = '2017-7-27'
g_version_str = '0.53.0'
import sys
from collections import defaultdict
from operator import itemgetter, attrgetter
class InputError(Exception):
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def ErrorLeader(infile, lineno):
return '\"' + infile + '\", line ' + str(lineno) + ': '
class MiscSettings(object):
def __init__(self):
self.tstart = None
self.tstop = None
self.timestep_str = ''
self.last_frame = False
self.center_frame = False
self.output_format = 'data'
self.input_format = 'dump'
self.multi = True
self.skip_interval = 1
self.scale = None
class AtomStyleSettings(object):
def __init__(self):
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Atoms" section of a LAMMPS data file:
self.column_names = [] # <--A list of column names (optional)
# <--A triplet of integers indicating which columns store coordinate data
self.i_coords = []
# self.ii_coords= [] #<--A list of triplets of column indexes storing
# coordinate data
self.ii_vects = [] # <--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid = None # <--An integer indicating which column has the atomid
self.i_atomtype = None # <--An integer indicating which column has the atomtype
self.i_molid = None # <--An integer indicating which column has the molid, if applicable
class DataSettings(AtomStyleSettings):
def __init__(self):
AtomStyleSettings.__init__(self)
self.contents = ''
self.file_name = ''
# Atom Styles in LAMMPS as of 2011-7-29
g_style_map = {'angle': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'atomic': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
'body': ['atom-ID', 'atom-type', 'bodyflag', 'mass', 'x', 'y', 'z'],
'bond': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'charge': ['atom-ID', 'atom-type', 'q', 'x', 'y', 'z'],
'dipole': ['atom-ID', 'atom-type', 'q', 'x', 'y', 'z', 'mux', 'muy', 'muz'],
'dpd': ['atom-ID', 'atom-type', 'theta', 'x', 'y', 'z'],
'electron': ['atom-ID', 'atom-type', 'q', 'spin', 'eradius', 'x', 'y', 'z'],
'ellipsoid': ['atom-ID', 'atom-type', 'x', 'y', 'z', 'quatw', 'quati', 'quatj', 'quatk'],
'full': ['atom-ID', 'molecule-ID', 'atom-type', 'q', 'x', 'y', 'z'],
'line': ['atom-ID', 'molecule-ID', 'atom-type', 'lineflag', 'density', 'x', 'y', 'z'],
'meso': ['atom-ID', 'atom-type', 'rho', 'e', 'cv', 'x', 'y', 'z'],
'molecular': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'peri': ['atom-ID', 'atom-type', 'volume', 'density', 'x', 'y', 'z'],
'smd': ['atom-ID', 'atom-type', 'molecule-ID' 'volume', 'mass', 'kernel-radius', 'contact-radius', 'x', 'y', 'z'],
'sphere': ['atom-ID', 'atom-type', 'diameter', 'density', 'x', 'y', 'z'],
'template': ['atom-ID', 'molecule-ID', 'template-index', 'template-atom', 'atom-type', 'x', 'y', 'z'],
'tri': ['atom-ID', 'molecule-ID', 'atom-type', 'triangleflag', 'density', 'x', 'y', 'z'],
'wavepacket': ['atom-ID', 'atom-type', 'charge', 'spin', 'eradius', 'etag', 'cs_re', 'cs_im', 'x', 'y', 'z'],
'hybrid': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
# The following styles were removed from LAMMPS as of 2012-3
'colloid': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
'granular': ['atom-ID', 'atom-type', 'diameter', 'density', 'x', 'y', 'z']}
def AtomStyle2ColNames(atom_style_string):
atom_style_string = atom_style_string.strip()
if len(atom_style_string) == 0:
raise InputError('Error(dump2data): Invalid atom_style\n'
' (The atom_style command was followed by an empty string.)\n')
atom_style_args = atom_style_string.split()
atom_style = atom_style_args[0]
hybrid_args = atom_style_args[1:]
if (atom_style not in g_style_map):
if (len(atom_style_args) >= 2):
# If the atom_style_string includes at least 2 words, then we
# interpret this as a list of the individual column names
return atom_style_args
else:
raise InputError(
'Error(dump2data): Unrecognized atom_style: \"' + atom_style + '\"\n')
if (atom_style != 'hybrid'):
return g_style_map[atom_style]
else:
column_names = ['atom-ID', 'atom-type', 'x', 'y', 'z']
if (len(hybrid_args) == 0):
raise InputError(
'Error(dump2data): atom_style hybrid must be followed by a sub_style.\n')
for sub_style in hybrid_args:
if (sub_style not in g_style_map):
raise InputError(
'Error(dump2data): Unrecognized atom_style: \"' + sub_style + '\"\n')
for cname in g_style_map[sub_style]:
if cname not in column_names:
column_names.append(cname)
return column_names
def ColNames2AidAtypeMolid(column_names):
# Because of the diversity of ways that these
# numbers are referred to in the LAMMPS documentation,
# we have to be flexible and allow the user to refer
# to these quantities in a variety of ways.
# Hopefully this covers everything:
i_atomid = None
if 'atom-ID' in column_names:
i_atomid = column_names.index('atom-ID')
elif 'atom−ID' in column_names: # (− is the character used in the manual)
i_atomid = column_names.index('atom−ID')
elif 'atomID' in column_names:
i_atomid = column_names.index('atomID')
elif 'atomid' in column_names:
i_atomid = column_names.index('atomid')
elif 'id' in column_names:
i_atomid = column_names.index('id')
elif 'atom' in column_names:
i_atomid = column_names.index('atom')
elif '$atom' in column_names:
i_atomid = column_names.index('$atom')
else:
raise InputError(
'Error(dump2data): List of column names lacks an \"atom-ID\"\n')
i_atomtype = None
if 'atom-type' in column_names:
i_atomtype = column_names.index('atom-type')
elif 'atom−type' in column_names: # (− hyphen character used in manual)
i_atomtype = column_names.index('atom−type')
elif 'atomtype' in column_names:
i_atomtype = column_names.index('atomtype')
elif 'type' in column_names:
i_atomtype = column_names.index('type')
elif '@atom' in column_names:
i_atomtype = column_names.index('@atom')
else:
raise InputError(
'Error(dump2data): List of column names lacks an \"atom-type\"\n')
i_molid = None
if 'molecule-ID' in column_names:
i_molid = column_names.index('molecule-ID')
elif 'molecule−ID' in column_names: # (− hyphen character used in manual)
i_molid = column_names.index('molecule−ID')
elif 'moleculeID' in column_names:
i_molid = column_names.index('moleculeID')
elif 'moleculeid' in column_names:
i_molid = column_names.index('moleculeid')
elif 'molecule' in column_names:
i_molid = column_names.index('molecule')
elif 'molID' in column_names:
i_molid = column_names.index('molID')
elif 'molid' in column_names:
i_molid = column_names.index('molid')
elif 'mol' in column_names:
i_molid = column_names.index('mol')
elif '$mol' in column_names:
i_molid = column_names.index('$mol')
else:
pass # some atom_types do not have a valid molecule-ID
return i_atomid, i_atomtype, i_molid
def ColNames2Coords(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rigid-body
(affine: rotation + translation) transformations?
This function outputs a list of lists of triplets of integers.
"""
i_x = None
i_y = None
i_z = None
if 'x' in column_names:
i_x = column_names.index('x')
if 'y' in column_names:
i_y = column_names.index('y')
if 'z' in column_names:
i_z = column_names.index('z')
if (((i_x != None) != (i_y != None)) or
((i_y != None) != (i_z != None)) or
((i_z != None) != (i_x != None))):
raise InputError(
'Error(dump2data): columns must include \"x\", \"y\", and \"z\".\n')
return [[i_x, i_y, i_z]]
def ColNames2Vects(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rotations?
Some coordinates like dipole moments and
ellipsoid orientations should only be rotated
(not translated).
This function outputs a list of lists of triplets of integers.
"""
vects = []
i_mux = None
i_muy = None
i_muz = None
if 'mux' in column_names:
i_mux = column_names.index('mux')
if 'muy' in column_names:
i_muy = column_names.index('muy')
if 'muz' in column_names:
i_muz = column_names.index('muz')
if (((i_mux != None) != (i_muy != None)) or
((i_muy != None) != (i_muz != None)) or
((i_muz != None) != (i_mux != None))):
raise InputError(
'Error(dump2data): custom atom_style list must define mux, muy, and muz or none.\n')
if i_mux != None:
vects.append([i_mux, i_muy, i_muz])
i_quati = None
i_quatj = None
i_quatk = None
if 'quati' in column_names:
i_quati = column_names.index('quati')
if 'quatj' in column_names:
i_quatj = column_names.index('quatj')
if 'quatk' in column_names:
i_quatk = column_names.index('quatk')
if (((i_quati != None) != (i_quatj != None)) or
((i_quatj != None) != (i_quatk != None)) or
((i_quatk != None) != (i_quati != None))):
raise InputError(
'Error(dump2data): custom atom_style list must define quati, quatj, and quatk or none.\n')
if i_quati != None:
vects.append([i_quati, i_quatj, i_quatk])
return vects
def ParseArgs(argv,
misc_settings,
data_settings,
warning_strings=None):
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by this program.
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
in_init = []
if i + 1 >= len(argv):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
data_settings.column_names = AtomStyle2ColNames(argv[i + 1])
sys.stderr.write(' \"Atoms\" column format:\n')
sys.stderr.write(
' ' + (' '.join(data_settings.column_names)) + '\n')
# ColNames2Coords() and ColNames2Vects() generate lists of
# triplets of integers, storing the column numbers containing
# x, y, and z coordinate values, and vx,vy,vz direction vectors.
data_settings.ii_vects = ColNames2Vects(data_settings.column_names)
ii_coords = ColNames2Coords(data_settings.column_names)
# This program assumes that there is only one coordinate triplet
# (x,y,z) for each atom. Hence we assume that len(ii_coords)==1
assert(len(ii_coords) == 1)
data_settings.i_coords = ii_coords[0]
# Now figure out which columns correspond to atomid, atomtype,
# molid
data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(
data_settings.column_names)
del(argv[i:i + 2])
elif (argv[i].lower() == '-icoord'):
if i + 1 >= len(argv):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"Atoms\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 intebers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
#ii_coords = []
# for i in range(0, len(ilist)/3):
# cols = [ilist[3*i]+1, ilist[3*i+1]+1, ilist[3*i+2]+1]
# ii_coords.append(cols)
# if ((len(ii_coords) != 0) or (len(ii_coords[0]) != 3)):
# raise InputError('Error(dump2data): Argument \"'+argv[i]+'\" must be followed by exactly 3 integers.\n')
data_settings.i_coords = ilist
if (len(i_coords) != 3):
raise InputError('Error(dump2data): Argument \"' +
argv[i] + '\" must be followed by exactly 3 integers.\n')
data_settings.i_coords = ii_coords[0]
del(argv[i:i + 2])
elif (argv[i].lower() == '-ivect'):
if i + 1 >= len(argv):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"Atoms\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 intebers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
data_settings.ii_vects = []
for i in range(0, len(ilist) / 3):
cols = [ilist[3 * i] + 1, ilist[3 * i + 1] +
1, ilist[3 * i + 2] + 1]
setting.ii_vects.append(cols)
# This should override any earlier settings as a result of the
# -atomstyle argument. So you can specify a custom list of column
# names using -atomstyle "list of column names", and then afterwards
# specify which of these columns correspond to direction vectors
# using the "-ivect" command line argument later on.
# This way, in theory you should be able to read columns from
# new custom atom-styles that have not been invented yet.
# (Although I haven't tested this.)
del(argv[i:i + 2])
# i_atomid is not really needed for this program, but I load it anyway
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
# i_atomtype is not really needed for this program, but I load it
# anyway
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i + 1]) - 1
del(argv[i:i + 2])
# i_molid is not really needed for this program, but I load it anyway
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
del(argv[i:i + 2])
# Which frame do we want?
elif (argv[i].lower() == '-t'):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer indicating\n'
' the frame you want to extract from the dump file (trajectory).\n'
' This integer should match the timestep corresponding to the frame\n'
' whose coordinates you wish to extract.\n')
misc_settings.timestep_str = argv[i + 1]
del(argv[i:i + 2])
misc_settings.multi = False
misc_settings.last_frame = False
elif (argv[i].lower() == '-tstart'):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer indicating\n'
' the first frame you want to extract from the dump file (trajectory).\n'
' This integer should match the timestep corresponding to the frame\n'
' (after which) you wish to extract coordinates.\n')
misc_settings.tstart = float(argv[i + 1])
del(argv[i:i + 2])
misc_settings.multi = True
elif (argv[i].lower() == '-tstop'):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an number indicating\n'
' the first frame you want to extract from the dump file (trajectory).\n'
' Frames after this timestep will be ignored.\n')
misc_settings.tstop = float(argv[i + 1])
del(argv[i:i + 2])
misc_settings.multi = True
elif (argv[i].lower() == '-center'):
misc_settings.center_frame = True
del(argv[i:i + 1])
elif ((argv[i].lower() == '-raw') or (argv[i].lower() == '-rawout')):
misc_settings.output_format = 'raw'
del(argv[i:i + 1])
elif (argv[i].lower() == '-rawin'):
misc_settings.input_format = 'raw'
misc_settings.multi = False
del(argv[i:i + 1])
elif ((argv[i].lower() == '-xyz') or (argv[i].lower() == '-xyzout')):
misc_settings.output_format = 'xyz'
del(argv[i:i + 1])
elif (argv[i].lower() == '-xyzin'):
misc_settings.input_format = 'xyz'
misc_settings.multi = False
del(argv[i:i + 1])
elif (argv[i].lower() == '-multi'):
misc_settings.multi = True
del(argv[i:i + 1])
elif (argv[i].lower() == '-last'):
misc_settings.last_frame = True
misc_settings.multi = False
del(argv[i:i + 1])
elif (argv[i].lower() == '-interval'):
misc_settings.skip_interval = int(argv[i + 1])
del(argv[i:i + 2])
elif (argv[i].lower() == '-scale'):
misc_settings.scale = float(argv[i + 1])
del(argv[i:i + 2])
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
raise InputError(
'Error(dump2data): Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
usage_examples = \
""" Typical usage:
dump2data.py orig_file.data < dump.lammpstrj > new_file.data
(This extracts last frame, uses "full" atom_style.)
Additional options:
dump2data.py -t t -atomstyle style orig.data < dump.lammpstrj > new.data
"""
# if __name__ == "__main__":
if (len(argv) > 2):
# if there are more than 2 remaining arguments,
# AND
# no other function will process the remaining argument list
# (ie. if __name__ == "__main__")
# THEN
raise InputError(' ----\n'
'ERROR(dump2data): You have too many arguments (or unrecognized arguments):\n'
' \"' + (' '.join(argv)) + '\"\n'
' ----\n'
+ usage_examples)
elif (len(argv) < 2):
if misc_settings.output_format == 'data':
raise InputError(' ----\n'
'ERROR(dump2data): Problem with argument list:\n'
' Expected a LAMMPS .data file as an argument.\n'
' ----\n'
+ usage_examples)
else:
in_data_file = open(argv[1], 'r')
data_settings.file_name = argv[1]
data_settings.contents = in_data_file.readlines()
in_data_file.close()
# end of if-then statement for "if __name__ == "__main__""
if len(data_settings.i_coords) == 0:
if warning_strings != None:
warning_strings.append(
'WARNING(dump2data): atom_style unknown. (Use -atomstyle style. Assuming \"full\")')
warn_atom_style_unspecified = True
# The default atom_style is "full"
data_settings.column_names = AtomStyle2ColNames('full')
ii_coords = ColNames2Coords(data_settings.column_names)
# This program assumes that there is only one coordinate triplet
# (x,y,z) for each atom. Hence we assume that len(ii_coords)==1
assert(len(ii_coords) == 1)
data_settings.i_coords = ii_coords[0]
data_settings.ii_vects = ColNames2Vects(data_settings.column_names)
data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(
data_settings.column_names)
# sys.stderr.write('########################################################\n'
# '## WARNING: atom_style unspecified ##\n'
# '## --> \"Atoms\" column data has an unknown format. ##\n'
# '## Assuming atom_style = \"full\" ##\n'
# '########################################################\n'
# '## To specify the \"Atoms\" column format you can: ##\n'
# '## 1) Use the -atom_style \"STYLE\" argument ##\n'
# '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
# '## atom_style, including hybrid styles.(Standard ##\n'
# '## atom styles defined in 2011 are supported.) ##\n'
# '## 2) Use the -atom_style \"COL_LIST\" argument ##\n'
# '## where \"COL_LIST" is a quoted list of strings ##\n'
# '## indicating the name of each column. ##\n'
# '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
# '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
# '## and \"quati\",\"quatj\",\"quatk\" are ##\n'
# '## interpreted as direction vectors. ##\n'
# '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
# '## where \"cx cy cz\" is a list of integers ##\n'
# '## indicating the column numbers for the x,y,z ##\n'
# '## coordinates of each atom. ##\n'
# '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
# '## where \"cmux cmuy cmuz...\" is a list of ##\n'
# '## integers indicating the column numbers for ##\n'
# '## the vector that determines the direction of a ##\n'
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
# '## 5) Include a ##\n'
# '## write(\"in_init.txt\"){atom_style ...} ##\n'
# '## statement in your .ttree file. ##\n'
# '########################################################\n')
def GetIntAtomID(pair):
return int(pair[0])
def WriteFrameToData(out_file,
descr_str,
misc_settings,
data_settings,
natoms,
coords,
coords_ixiyiz,
vects,
velocities,
atomtypes,
molids,
xlo_str, xhi_str,
ylo_str, yhi_str,
zlo_str, zhi_str,
xy_str, xz_str, yz_str):
"""
Open a data file. Read the LAMMPS DATA file line by line.
When the line contains information which is also in the dump file,
replace that information with information from the dump file.
(Information from a dump file is stored in the arguments to this function.)
The resulting file also has LAMMPS DATA format.
"""
section = ''
firstline = True
for line in data_settings.contents:
ic = line.find('#')
if ic != -1:
line = line[:ic]
line = line.strip()
if firstline: # Construct a new descriptive header line:
if descr_str != None:
line = descr_str
firstline = False
if (len(line) > 0):
# The initial section (section='') is assumed to be
# the "LAMMPS Description" section. This is where the
# box boundaries are specified.
if section == '':
tokens = line.split()
if ((len(tokens) >= 2) and
((tokens[-2] == 'xlo') and (tokens[-1] == 'xhi')) and
((xlo_str != None) and (xhi_str != None))):
tokens[0] = xlo_str
tokens[1] = xhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 2) and
((tokens[-2] == 'ylo') and (tokens[-1] == 'yhi')) and
((ylo_str != None) and (yhi_str != None))):
tokens[0] = ylo_str
tokens[1] = yhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 2) and
((tokens[-2] == 'zlo') and (tokens[-1] == 'zhi')) and
((zlo_str != None) and (zhi_str != None))):
tokens[0] = zlo_str
tokens[1] = zhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 3) and
((tokens[-3] == 'xy') and
(tokens[-2] == 'xz') and
(tokens[-1] == 'yz')) and
((xy_str != None) and
(xz_str != None) and
(yz_str != None))):
tokens[0] = xy_str
tokens[1] = xz_str
tokens[2] = yz_str
line = ' '.join(tokens)
if (line in set(['Masses', 'Velocities', 'Atoms',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
'Bonds', 'Angles', 'Dihedrals', 'Impropers'])):
section = line
else:
if (section == 'Atoms'):
tokens = line.split()
atomid = tokens[0]
# update the atomtype and molID
# (which may change during the simulation)
if atomtypes:
tokens[data_settings.i_atomtype] = atomtypes[atomid]
if molids and data_settings.i_molid:
tokens[data_settings.i_molid] = molids[atomid]
if atomid in coords:
# Loop over all of the vector degrees of
# freedom of the particle, excluding coords
# (for example: mu_x, mu_y, mu_z,
# or quat_i, quat_j, quat_k)
# In principle, depending on the atom_style,
# there could be multiple vectors per atom.
for I in range(0, len(data_settings.ii_vects)):
if atomid in vects:
vxvyvz = vects[atomid][I]
assert((type(vxvyvz) is tuple) and
(len(vxvyvz) == 3))
i_vx = data_settings.ii_vects[I][0]
i_vy = data_settings.ii_vects[I][1]
i_vz = data_settings.ii_vects[I][2]
if ((i_vx >= len(tokens)) or
(i_vy >= len(tokens)) or
(i_vz >= len(tokens))):
raise InputError('Error(dump2data): Atom style incompatible with data file.\n'
' Specify the atom_style using -atomstyle style.\n')
# Replace the vector components with numbers
# from the dump file
tokens[i_vx] = vxvyvz[0]
tokens[i_vy] = vxvyvz[1]
tokens[i_vz] = vxvyvz[2]
else:
if data_settings.column_names[
i_vx] not in dump_column_names:
raise InputError('Error(dump2data): You have a vector coordinate in your DATA file named \"' + data_settings.column_names[i_vx] + '\"\n'
' However there are no columns with this name in your DUMP file\n'
' (or the column was not in the expected place).\n'
' Hence, the atom styles in the dump and data files do not match.')
# Now loop over the coordinates of each atom.
# for I in range(0,len(data_settings.ii_coords)):
# xyz = coords[atomid][I]
# THIS LOOP IS SILLY.
# EACH ATOM ONLY HAS ONE SET OF X,Y,Z
# COORDINATES. COMMENTING OUT THIS LOOP:
# i_x = data_settings.ii_coords[I][0]
# i_y = data_settings.ii_coords[I][1]
# i_z = data_settings.ii_coords[I][2]
# USING THIS INSTEAD:
xyz = coords[atomid]
i_x = data_settings.i_coords[0]
i_y = data_settings.i_coords[1]
i_z = data_settings.i_coords[2]
if ((i_x >= len(tokens)) or
(i_y >= len(tokens)) or
(i_z >= len(tokens))):
raise InputError('Error(dump2data): Atom style incompatible with data file.\n'
' Specify the atom_style using -atomstyle style.\n')
# Replace the coordinates with coordinates from
# the dump file into tokens[i_x]...
tokens[i_x] = str(xyz[0])
tokens[i_y] = str(xyz[1])
tokens[i_z] = str(xyz[2])
# Are there there any integer coords
# (ix, iy, iz) in the dump file?
if coords_ixiyiz[atomid]:
assert(len(coords_ixiyiz[atomid]) == 3)
# Integer coords stored in the DATA file too?
if len(tokens) == (len(data_settings.column_names) + 3):
# Then replace the last 3 columns of the
# line in the data file with: ix iy iz
tokens[-3] = coords_ixiyiz[atomid][0]
tokens[-2] = coords_ixiyiz[atomid][1]
tokens[-1] = coords_ixiyiz[atomid][2]
else:
if (not misc_settings.center_frame):
# Append them to the end of the line:
tokens.append(coords_ixiyiz[atomid][0])
tokens.append(coords_ixiyiz[atomid][1])
tokens.append(coords_ixiyiz[atomid][2])
# Now finally paste all the tokens together:
line = ' '.join(tokens)
elif (section == 'Velocities'):
tokens = line.split()
atomid = tokens[0]
if atomid in velocities:
vxvyvz = velocities[atomid]
if len(tokens) < 4:
raise InputError(
'Error(dump2data): Not enough columns in the \"Velocities\" file.\n')
# Replace the coordinates with coordinates from
# the dump file into tokens[i_x]...
tokens[1] = str(vxvyvz[0])
tokens[2] = str(vxvyvz[1])
tokens[3] = str(vxvyvz[2])
# Now finally paste all the tokens together:
line = ' '.join(tokens)
out_file.write(line + '\n')
return
def main():
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
# if sys.version < '3':
# sys.stderr.write(' (python version < 3)\n')
# else:
sys.stderr.write('\n')
try:
data_settings = DataSettings()
misc_settings = MiscSettings()
warning_strings = []
ParseArgs(sys.argv,
misc_settings,
data_settings,
warning_strings)
# Open the lammps dump file (trajectory file)
# Skip to the line containing the correct frame/timestep.
# (this is the last frame by default).
# Read the "BOX BOUNDS" and the "ATOMS" sections.
# Store the x,y,z coordinates in the "coords" associative array
# (indexed by atom id, which could be non-numeric in general).
section = ''
#coords = defaultdict(list)
#coords_ixiyiz = defaultdict(list)
#vects = defaultdict(list)
#xlo_str = xhi_str = ylo_str = yhi_str = zlo_str = zhi_str = None
#xy_str = xz_str = yz_str = None
#natoms = -1
#timestep_str = ''
frame_coords = defaultdict(list)
frame_coords_ixiyiz = defaultdict(list)
frame_vects = defaultdict(list)
frame_velocities = defaultdict(list)
frame_atomtypes = defaultdict(list)
frame_molid = defaultdict(list)
frame_xlo_str = frame_xhi_str = None
frame_ylo_str = frame_yhi_str = None
frame_zlo_str = frame_zhi_str = None
frame_xy_str = frame_xz_str = frame_yz_str = None
frame_natoms = -1
frame_timestep_str = ''
i_atomid = i_atomtype = i_molid = -1
i_x = i_y = i_z = i_xu = i_yu = i_zu = -1
i_xs = i_ys = i_zs = i_xsu = i_ysu = i_zsu = -1
dump_column_names = []
#num_frames_in = -1
num_frames_out = 0
finished_reading_frame = False
read_last_frame = False
#in_coord_file = open('tmp_atom_coords.dat','r')
in_coord_file = sys.stdin
while True:
line = in_coord_file.readline()
if line == '': # if EOF
if len(frame_coords) > 0:
finished_reading_frame = True
read_last_frame = True
line = line.strip()
if (line.find('ITEM:') == 0):
section = line
if (section.find('ITEM: ATOMS ') == 0):
dump_column_names = line[12:].split()
i_atomid, i_atomtype, i_molid = \
ColNames2AidAtypeMolid(dump_column_names)
#ii_coords = ColNames2Coords(dump_column_names)
x_already_unwrapped = False
y_already_unwrapped = False
z_already_unwrapped = False
if 'x' in dump_column_names:
i_x = dump_column_names.index('x')
elif 'xu' in dump_column_names:
i_xu = dump_column_names.index('xu')
x_already_unwrapped = True
elif 'xs' in dump_column_names:
i_xs = dump_column_names.index('xs')
elif 'xsu' in dump_column_names:
i_xsu = dump_column_names.index('xsu')
x_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"x\" column.\n' +
' (excerpt below)\n' + line)
if 'y' in dump_column_names:
i_y = dump_column_names.index('y')
elif 'yu' in dump_column_names:
i_yu = dump_column_names.index('yu')
y_already_unwrapped = True
elif 'ys' in dump_column_names:
i_ys = dump_column_names.index('ys')
elif 'ysu' in dump_column_names:
i_ysu = dump_column_names.index('ysu')
y_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"y\" column.\n' +
' (excerpt below)\n' + line)
if 'z' in dump_column_names:
i_z = dump_column_names.index('z')
elif 'zu' in dump_column_names:
i_zu = dump_column_names.index('zu')
z_already_unwrapped = True
elif 'zs' in dump_column_names:
i_zs = dump_column_names.index('zs')
elif 'zsu' in dump_column_names:
i_zsu = dump_column_names.index('zsu')
z_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"z\" column.\n' +
' (excerpt below)\n' + line)
ii_vects = ColNames2Vects(dump_column_names)
if (len(ii_vects) != len(data_settings.ii_vects)):
raise InputError('Error(dump2data): atom styles in data and dump files differ.\n'
' Some needed columns from the atom_styles are missing in the dump file.')
i_ix = i_iy = i_iz = -1
if 'ix' in dump_column_names:
i_ix = dump_column_names.index('ix')
if 'iy' in dump_column_names:
i_iy = dump_column_names.index('iy')
if 'iz' in dump_column_names:
i_iz = dump_column_names.index('iz')
i_vx = i_vy = i_vz = -1
if 'vx' in dump_column_names:
i_vx = dump_column_names.index('vx')
if 'vy' in dump_column_names:
i_vy = dump_column_names.index('vy')
if 'vz' in dump_column_names:
i_vz = dump_column_names.index('vz')
elif (section.find('ITEM: BOX BOUNDS') == 0):
avec = [1.0, 0.0, 0.0]
bvec = [0.0, 1.0, 0.0]
cvec = [0.0, 0.0, 1.0]
elif (section.find('ITEM: TIMESTEP') == 0):
if len(frame_coords) > 0:
finished_reading_frame = True
elif ((len(line) > 0) and (line[0] != '#')):
if (section.find('ITEM: TIMESTEP') == 0):
finished_reading_frame = False
frame_timestep_str = line
frame_coords = defaultdict(list)
frame_coords_ixiyiz = defaultdict(list)
frame_vects = defaultdict(list)
frame_velocities = defaultdict(list)
frame_atomtypes = defaultdict(list)
frame_molids = defaultdict(list)
frame_xlo_str = frame_xhi_str = None
frame_ylo_str = frame_yhi_str = None
frame_zlo_str = frame_zhi_str = None
frame_xy_str = frame_xz_str = frame_yz_str = None
elif (section == 'ITEM: NUMBER OF ATOMS'):
frame_natoms = int(line)
elif (section.find('ITEM: BOX BOUNDS') == 0):
is_triclinic = (section.find('xy xz yz') == 0)
tokens = line.split()
if not frame_xlo_str:
assert(not frame_xhi_str)
frame_xlo_str = tokens[0]
frame_xhi_str = tokens[1]
avec[0] = float(frame_xhi_str) - float(frame_xlo_str)
if (is_triclinic and (len(tokens) > 2)):
frame_xy_str = tokens[2]
bvec[0] = float(frame_xy_str)
# See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
# sys.stderr.write('avec='+str(avec)+'\n')
elif not frame_ylo_str:
assert(not frame_yhi_str)
frame_ylo_str = tokens[0]
frame_yhi_str = tokens[1]
bvec[1] = float(frame_yhi_str) - float(frame_ylo_str)
if (is_triclinic and (len(tokens) > 2)):
frame_xz_str = tokens[2]
cvec[0] = float(frame_xz_str)
# See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
# sys.stderr.write('bvec='+str(bvec)+'\n')
elif not frame_zlo_str:
assert(not frame_zhi_str)
frame_zlo_str = tokens[0]
frame_zhi_str = tokens[1]
cvec = [0.0, 0.0, float(
frame_zhi_str) - float(frame_zlo_str)]
if (is_triclinic and (len(tokens) > 2)):
frame_yz_str = tokens[2]
cvec[1] = float(frame_yz_str)
# See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
# sys.stderr.write('cvec='+str(cvec)+'\n')
elif (section.find('ITEM: ATOMS') == 0):
tokens = line.split()
atomid = tokens[i_atomid]
atomtype = tokens[i_atomtype]
frame_atomtypes[atomid] = atomtype
if i_molid:
molid = tokens[i_molid]
frame_molids[atomid] = molid
if ((i_x != -1) and (i_y != -1) and (i_z != -1)):
x = float(tokens[i_x]) # i_x determined above
y = float(tokens[i_y])
z = float(tokens[i_z])
elif ((i_xu != -1) and (i_yu != -1) and (i_zu != -1)):
x = float(tokens[i_xu]) # i_x determined above
y = float(tokens[i_yu])
z = float(tokens[i_zu])
elif ((i_xs != -1) and (i_ys != -1) and (i_zs != -1)):
xs = float(tokens[i_xs]) # i_xs determined above
ys = float(tokens[i_ys])
zs = float(tokens[i_zs])
x = float(xlo_str) + xs * \
avec[0] + ys * bvec[0] + zs * cvec[0]
y = float(ylo_str) + xs * \
avec[1] + ys * bvec[1] + zs * cvec[1]
z = float(zlo_str) + xs * \
avec[2] + ys * bvec[2] + zs * cvec[2]
# avec, bvec, cvec described here:
# http://lammps.sandia.gov/doc/Section-howto.html#howto_12
elif ((i_xsu != -1) and (i_ysu != -1) and (i_zsu != -1)):
xsu = float(tokens[i_xsu]) # i_xs determined above
ysu = float(tokens[i_ysu])
zsu = float(tokens[i_zsu])
x = float(xlo_str) + xsu * \
avec[0] + ysu * bvec[0] + zsu * cvec[0]
y = float(ylo_str) + xsu * \
avec[1] + ysu * bvec[1] + zsu * cvec[1]
z = float(zlo_str) + xsu * \
avec[2] + ysu * bvec[2] + zsu * cvec[2]
# Now deal with ix, iy, iz
if (i_ix != -1) and (not x_already_unwrapped):
ix = int(tokens[i_ix])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', ix = '+str(ix)+', avec='+str(avec)+'\n')
x += ix * avec[0]
y += ix * avec[1]
z += ix * avec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][0] = str(ix)
if (i_iy != -1) and (not y_already_unwrapped):
iy = int(tokens[i_iy])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', iy = '+str(iy)+', bvec='+str(bvec)+'\n')
x += iy * bvec[0]
y += iy * bvec[1]
z += iy * bvec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][1] = str(iy)
if (i_iz != -1) and (not z_already_unwrapped):
iz = int(tokens[i_iz])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', iz = '+str(iz)+', cvec='+str(cvec)+'\n')
x += iz * cvec[0]
y += iz * cvec[1]
z += iz * cvec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][2] = str(iz)
#frame_coords[atomid] = [str(x), str(y), str(z)]
frame_coords[atomid] = [x, y, z]
vx = 0.0
vy = 0.0
vz = 0.0
if i_vx != -1:
vx = float(tokens[i_vx])
if i_vy != -1:
vy = float(tokens[i_vy])
if i_vz != -1:
vz = float(tokens[i_vz])
frame_velocities[atomid] = [vx, vy, vz]
# NOTE:
# There can be multiple "vects" associated with each atom
# (for example, dipole moments, ellipsoid directions, etc..)
if atomid not in frame_vects:
frame_vects[atomid] = [
None for I in range(0, len(ii_vects))]
for I in range(0, len(ii_vects)):
i_vx = ii_vects[I][0]
i_vy = ii_vects[I][1]
i_vz = ii_vects[I][2]
vx_str = tokens[i_vx]
vy_str = tokens[i_vy]
vz_str = tokens[i_vz]
# Now the annoying part:
# Which vect is it (mux,muy,muz) or (quati,quatj,quatk)?
# The columns could be listed in a different order
# in the data file and in the dump file.
# Figure out which vector it is in the data file (stored
# in the integer "I_data") so that column names match.
name_vx = dump_column_names[i_vx]
name_vy = dump_column_names[i_vy]
name_vz = dump_column_names[i_vz]
i_vx_data = 0
I_data = -1
# This code is ugly and inneficient.
# I never want to touch this code again. (Hope it
# works)
while i_vx_data < len(data_settings.column_names):
if name_vx == data_settings.column_names[i_vx_data]:
I_data = 0
while I_data < len(data_settings.ii_vects):
if ii_vects[I] == data_settings.ii_vects[I_data]:
break
I_data += 1
if (0 < I_data) and (I_data < len(data_settings.ii_vects)):
break
i_vx_data += 1
if (0 <= I_data) and (I_data < len(data_settings.ii_vects)):
frame_vects[atomid][I_data] = (
vx_str, vy_str, vz_str)
else:
raise InputError('Error(dump2data): You have a vector coordinate in your dump file named \"' + name_vx + '\"\n'
' However there are no columns with this name in your data file\n'
' (or the column was not in the expected place).\n'
' Hence, the atom styles in the dump and data files do not match.')
if finished_reading_frame:
if misc_settings.scale != None:
for atomid in frame_coords:
for d in range(0, 3):
crd = float(frame_coords[atomid][d])
frame_coords[atomid][d] = str(
crd * misc_settings.scale)
if len(frame_coords) != frame_natoms:
err_msg = 'Number of lines in \"ITEM: ATOMS\" section disagrees with\n' \
+ ' \"ITEM: NUMBER OF ATOMS\" declared earlier in this file.\n'
raise InputError(err_msg)
if misc_settings.center_frame:
cm = [0.0, 0.0, 0.0]
for atomid in frame_coords:
for d in range(0, 3):
cm[d] += float(frame_coords[atomid][d])
for d in range(0, 3):
cm[d] /= float(len(frame_coords))
for atomid in frame_coords:
for d in range(0, 3):
frame_coords[atomid][d] = "%.7g" % (
float(frame_coords[atomid][d]) - cm[d])
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
if misc_settings.output_format != 'data':
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
# if (num_frames_in == -1):
# if (misc_settings.timestep_str != ''):
# if (float(frame_timestep_str) >=
# float(misc_settings.timestep_str)):
# num_frames_in = 1
# if not misc_settings.multi:
# read_last_frame = True
# else:
# num_frames_in = 1
# Should we write out the coordinates in this frame?
write_this_frame = False
if misc_settings.multi:
write_this_frame = True
if (misc_settings.tstart and
(int(frame_timestep_str) < misc_settings.tstart)):
write_this_frame = False
if (misc_settings.tstop and
(int(frame_timestep_str) > misc_settings.tstop)):
write_this_frame = False
read_last_frame = True
if misc_settings.tstart:
tstart = misc_settings.tstart
else:
tstart = 0
if ((int(frame_timestep_str) - tstart)
%
misc_settings.skip_interval) != 0:
write_this_frame = False
else:
if misc_settings.last_frame:
if read_last_frame:
write_this_frame = True
else:
assert(misc_settings.timestep_str)
if (int(frame_timestep_str) >=
int(misc_settings.timestep_str)):
write_this_frame = True
read_last_frame = True
if write_this_frame:
num_frames_out += 1
sys.stderr.write(' (writing frame ' + str(num_frames_out) +
' at timestep ' + frame_timestep_str + ')\n')
# Print the frame
# First check which format to output the data:
if misc_settings.output_format == 'raw':
# Print out the coordinates in simple 3-column text
# format
for atomid, xyz in iter(sorted(frame_coords.items(), key=GetIntAtomID)):
if misc_settings.scale == None:
sys.stdout.write(
str(xyz[0]) + ' ' + str(xyz[1]) + ' ' + str(xyz[2]) + '\n')
else:
# Only convert to float and back if
# misc_settings.scale != None
sys.stdout.write(str(misc_settings.scale * float(xyz[0])) + ' ' +
str(misc_settings.scale * float(xyz[1])) + ' ' +
str(misc_settings.scale * float(xyz[2])) + '\n')
sys.stdout.write('\n')
elif misc_settings.output_format == 'xyz':
# Print out the coordinates in simple 3-column text
# format
sys.stdout.write(str(len(frame_coords)) + '\n')
descr_str = 'LAMMPS data from timestep ' + frame_timestep_str
sys.stdout.write(descr_str + '\n')
for atomid, xyz in iter(sorted(frame_coords.items(), key=GetIntAtomID)):
if misc_settings.scale == None:
sys.stdout.write(str(atomid) + ' ' +
str(xyz[0]) + ' ' +
str(xyz[1]) + ' ' +
str(xyz[2]) + '\n')
else:
# Only convert to float and back if
# misc_settings.scale != None
sys.stdout.write(str(atomid) + ' ' +
str(misc_settings.scale * float(xyz[0])) + ' ' +
str(misc_settings.scale * float(xyz[1])) + ' ' +
str(misc_settings.scale * float(xyz[2])) + '\n')
else:
# Parse the DATA file specified by the user
# and replace appropriate lines or fields with
# the corresponding text from the DUMP file.
descr_str = 'LAMMPS data from timestep ' + frame_timestep_str
if misc_settings.multi and (misc_settings.output_format == 'data'):
out_file_name = data_settings.file_name + '.'\
+ str(num_frames_out)
sys.stderr.write(
' (creating file \"' + out_file_name + '\")\n')
out_file = open(out_file_name, 'w')
else:
out_file = sys.stdout
WriteFrameToData(out_file,
descr_str,
misc_settings,
data_settings,
frame_natoms,
frame_coords,
frame_coords_ixiyiz,
frame_vects,
frame_velocities,
frame_atomtypes,
frame_molids,
frame_xlo_str, frame_xhi_str,
frame_ylo_str, frame_yhi_str,
frame_zlo_str, frame_zhi_str,
frame_xy_str, frame_xz_str, frame_yz_str)
# if misc_settings.multi:
# out_file.close()
# if num_frames_in >= 0:
# num_frames_in += 1
if read_last_frame:
exit(0)
for warning_str in warning_strings:
sys.stderr.write(warning_str + '\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
| gpl-3.0 |
alaski/nova | nova/policies/flavors.py | 6 | 1064 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:flavors'
POLICY_ROOT = 'os_compute_api:flavors:%s'
flavors_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return flavors_policies
| apache-2.0 |
JVillella/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py | 141 | 3581 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.test_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(event_ndims=1),
bijectors.Affine(
shift=[0., 1.], scale_diag=[2., 3.], event_ndims=1),
bijectors.Softplus(event_ndims=1),
bijectors.SoftmaxCentered(event_ndims=1),
bijectors.SigmoidCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x).eval(),
rev.inverse_log_det_jacobian(x).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x).eval(),
rev.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.SigmoidCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.test_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
Gui13/CouchPotatoServer | libs/subliminal/exceptions.py | 170 | 1050 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
class Error(Exception):
"""Base class for exceptions in subliminal"""
pass
class ServiceError(Error):
""""Exception raised by services"""
pass
class DownloadFailedError(Error):
""""Exception raised when a download task has failed in service"""
pass
| gpl-3.0 |
Gr8z/Legend-Cogs | profanity/profanity.py | 1 | 2085 | import discord
from discord.ext import commands
from .utils.dataIO import dataIO, fileIO
import os
import asyncio
BOTCOMMANDER_ROLES = ["Family Representative", "Clan Manager", "Clan Deputy", "Co-Leader", "Hub Officer", "admin"]
class profanity:
"""profanity!"""
def __init__(self, bot):
self.bot = bot
self.bannedwords = dataIO.load_json('data/Profanity/banned_words.json')
async def banned_words(self, message):
word_set = set(self.bannedwords)
phrase_set = set(message.content.replace("*", "").replace("_", "").replace("#", "").split())
if word_set.intersection(phrase_set):
await self.bot.delete_message(message)
msg = await self.bot.send_message(
message.channel,
"{}, **We do not allow Hateful, obscene, offensive, racist, sexual, or violent words in any public channels.**".format(
message.author.mention
)
)
await asyncio.sleep(6)
await self.bot.delete_message(msg)
return
async def on_message_edit(self, before, after):
await self.banned_words(after)
async def on_message(self, message):
server = message.server
author = message.author
if message.author.id == self.bot.user.id:
return
botcommander_roles = [discord.utils.get(server.roles, name=r) for r in BOTCOMMANDER_ROLES]
botcommander_roles = set(botcommander_roles)
author_roles = set(author.roles)
if len(author_roles.intersection(botcommander_roles)):
return
await self.banned_words(message)
def check_folders():
if not os.path.exists("data/Profanity"):
print("Creating data/Profanity folder...")
os.makedirs("data/Profanity")
def check_files():
f = "data/Profanity/banned_words.json"
if not fileIO(f, "check"):
print("Creating empty banned_words.json...")
fileIO(f, "save", [])
def setup(bot):
check_folders()
check_files()
bot.add_cog(profanity(bot)) | mit |
mseroczynski/platformio | platformio/telemetry.py | 4 | 9325 | # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import atexit
import platform
import Queue
import re
import sys
import threading
import uuid
from collections import deque
from os import getenv
from time import sleep, time
import click
import requests
from platformio import __version__, app, exception, util
from platformio.ide.projectgenerator import ProjectGenerator
class TelemetryBase(object):
MACHINE_ID = str(uuid.uuid5(uuid.NAMESPACE_OID, str(uuid.getnode())))
def __init__(self):
self._params = {}
def __getitem__(self, name):
return self._params.get(name, None)
def __setitem__(self, name, value):
self._params[name] = value
def __delitem__(self, name):
if name in self._params:
del self._params[name]
def send(self, hittype):
raise NotImplementedError()
class MeasurementProtocol(TelemetryBase):
TRACKING_ID = "UA-1768265-9"
PARAMS_MAP = {
"screen_name": "cd",
"event_category": "ec",
"event_action": "ea",
"event_label": "el",
"event_value": "ev"
}
def __init__(self):
TelemetryBase.__init__(self)
self['v'] = 1
self['tid'] = self.TRACKING_ID
self['cid'] = self.MACHINE_ID
self['sr'] = "%dx%d" % click.get_terminal_size()
self._prefill_screen_name()
self._prefill_appinfo()
self._prefill_custom_data()
def __getitem__(self, name):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
return TelemetryBase.__getitem__(self, name)
def __setitem__(self, name, value):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
TelemetryBase.__setitem__(self, name, value)
def _prefill_appinfo(self):
self['av'] = __version__
# gather dependent packages
dpdata = []
dpdata.append("Click/%s" % click.__version__)
if app.get_session_var("caller_id"):
dpdata.append("Caller/%s" % app.get_session_var("caller_id"))
try:
result = util.exec_command(["scons", "--version"])
match = re.search(r"engine: v([\d\.]+)", result['out'])
if match:
dpdata.append("SCons/%s" % match.group(1))
except: # pylint: disable=W0702
pass
self['an'] = " ".join(dpdata)
def _prefill_custom_data(self):
self['cd1'] = util.get_systype()
self['cd2'] = "Python/%s %s" % (platform.python_version(),
platform.platform())
self['cd4'] = (1 if app.get_setting("enable_prompts") or
app.get_session_var("caller_id") else 0)
def _prefill_screen_name(self):
self['cd3'] = " ".join([str(s).lower() for s in sys.argv[1:]])
if not app.get_session_var("command_ctx"):
return
ctx_args = app.get_session_var("command_ctx").args
args = [str(s).lower() for s in ctx_args if not str(s).startswith("-")]
if not args:
return
if args[0] in ("lib", "platforms", "serialports", "settings"):
cmd_path = args[:2]
else:
cmd_path = args[:1]
self['screen_name'] = " ".join([p.title() for p in cmd_path])
def send(self, hittype):
if not app.get_setting("enable_telemetry"):
return
self['t'] = hittype
# correct queue time
if "qt" in self._params and isinstance(self['qt'], float):
self['qt'] = int((time() - self['qt']) * 1000)
MPDataPusher().push(self._params)
@util.singleton
class MPDataPusher(object):
MAX_WORKERS = 5
def __init__(self):
self._queue = Queue.LifoQueue()
self._failedque = deque()
self._http_session = requests.Session()
self._http_offline = False
self._workers = []
def push(self, item):
# if network is off-line
if self._http_offline:
if "qt" not in item:
item['qt'] = time()
self._failedque.append(item)
return
self._queue.put(item)
self._tune_workers()
def in_wait(self):
return self._queue.unfinished_tasks
def get_items(self):
items = list(self._failedque)
try:
while True:
items.append(self._queue.get_nowait())
except Queue.Empty:
pass
return items
def _tune_workers(self):
for i, w in enumerate(self._workers):
if not w.is_alive():
del self._workers[i]
need_nums = min(self._queue.qsize(), self.MAX_WORKERS)
active_nums = len(self._workers)
if need_nums <= active_nums:
return
for i in range(need_nums - active_nums):
t = threading.Thread(target=self._worker)
t.daemon = True
t.start()
self._workers.append(t)
def _worker(self):
while True:
item = self._queue.get()
_item = item.copy()
if "qt" not in _item:
_item['qt'] = time()
self._failedque.append(_item)
if self._send_data(item):
self._failedque.remove(_item)
else:
self._http_offline = True
self._queue.task_done()
def _send_data(self, data):
result = False
try:
r = self._http_session.post(
"https://ssl.google-analytics.com/collect",
data=data,
headers=util.get_request_defheaders(),
timeout=2
)
r.raise_for_status()
result = True
except: # pylint: disable=W0702
pass
return result
def on_command():
resend_backuped_reports()
mp = MeasurementProtocol()
mp.send("screenview")
if util.is_ci():
measure_ci()
if app.get_session_var("caller_id"):
measure_caller(app.get_session_var("caller_id"))
def measure_ci():
event = {
"category": "CI",
"action": "NoName",
"label": None
}
envmap = {
"APPVEYOR": {"label": getenv("APPVEYOR_REPO_NAME")},
"CIRCLECI": {"label": "%s/%s" % (getenv("CIRCLE_PROJECT_USERNAME"),
getenv("CIRCLE_PROJECT_REPONAME"))},
"TRAVIS": {"label": getenv("TRAVIS_REPO_SLUG")},
"SHIPPABLE": {"label": getenv("REPO_NAME")},
"DRONE": {"label": getenv("DRONE_REPO_SLUG")}
}
for key, value in envmap.iteritems():
if getenv(key, "").lower() != "true":
continue
event.update({"action": key, "label": value['label']})
on_event(**event)
def measure_caller(calller_id):
calller_id = str(calller_id)[:20].lower()
event = {
"category": "Caller",
"action": "Misc",
"label": calller_id
}
if calller_id in ProjectGenerator.get_supported_ides():
event['action'] = "IDE"
on_event(**event)
def on_run_environment(options, targets):
opts = ["%s=%s" % (opt, value) for opt, value in sorted(options.items())]
targets = [t.title() for t in targets or ["run"]]
on_event("Env", " ".join(targets), "&".join(opts))
def on_event(category, action, label=None, value=None, screen_name=None):
mp = MeasurementProtocol()
mp['event_category'] = category[:150]
mp['event_action'] = action[:500]
if label:
mp['event_label'] = label[:500]
if value:
mp['event_value'] = int(value)
if screen_name:
mp['screen_name'] = screen_name[:2048]
mp.send("event")
def on_exception(e):
if isinstance(e, exception.AbortedByUser):
return
mp = MeasurementProtocol()
mp['exd'] = "%s: %s" % (type(e).__name__, e)
mp['exf'] = int(not isinstance(e, exception.PlatformioException))
mp.send("exception")
@atexit.register
def _finalize():
timeout = 1000 # msec
elapsed = 0
while elapsed < timeout:
if not MPDataPusher().in_wait():
break
sleep(0.2)
elapsed += 200
backup_reports(MPDataPusher().get_items())
def backup_reports(items):
if not items:
return
KEEP_MAX_REPORTS = 100
tm = app.get_state_item("telemetry", {})
if "backup" not in tm:
tm['backup'] = []
for params in items:
# skip static options
for key in params.keys():
if key in ("v", "tid", "cid", "cd1", "cd2", "sr", "an"):
del params[key]
# store time in UNIX format
if "qt" not in params:
params['qt'] = time()
elif not isinstance(params['qt'], float):
params['qt'] = time() - (params['qt'] / 1000)
tm['backup'].append(params)
tm['backup'] = tm['backup'][KEEP_MAX_REPORTS * -1:]
app.set_state_item("telemetry", tm)
def resend_backuped_reports():
tm = app.get_state_item("telemetry", {})
if "backup" not in tm or not tm['backup']:
return False
for report in tm['backup']:
mp = MeasurementProtocol()
for key, value in report.items():
mp[key] = value
mp.send(report['t'])
# clean
tm['backup'] = []
app.set_state_item("telemetry", tm)
| mit |
jchodera/ensembler | ensembler/cli_commands/testrun_pipeline.py | 3 | 3371 | import os
import ensembler.cli_commands
helpstring = """\
Conduct a testrun of the entire ensembler pipeline in the current directory,
using the target protein EGFR_HUMAN, and one template structure from AURKB_HUMAN.
"""
docopt_helpstring = ''
def dispatch(args):
ensembler.cli_commands.init.dispatch(
args={
'--project_dir': '.',
}
)
ensembler.cli_commands.gather_targets.dispatch(
args={
'--gather_from': 'uniprot',
'--query': 'mnemonic:EGFR_HUMAN',
'--uniprot_domain_regex': '^Protein kinase',
}
)
structure_paths = ','.join([
os.path.join(ensembler.core.installation_toplevel_dir, 'tests', 'example_project', 'structures', 'pdb'),
os.path.join(ensembler.core.installation_toplevel_dir, 'tests', 'example_project', 'structures', 'sifts'),
])
ensembler.cli_commands.gather_templates.dispatch(
args={
'--gather_from': 'uniprot',
'--query': 'mnemonic:AURKB_HUMAN',
'--uniprot_domain_regex': '^Protein kinase',
'--structure_paths': structure_paths,
}
)
# ensembler.cli_commands.loopmodel.dispatch(
# args={
# '--templates': 'AURKB_HUMAN_D0_4AF3_A',
# '--overwrite_structures': True,
# '--verbose': False,
# }
# )
ensembler.cli_commands.align.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--verbose': False,
}
)
ensembler.cli_commands.build_models.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--model_seqid_cutoff': None,
'--verbose': False,
}
)
ensembler.cli_commands.cluster.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--verbose': False,
}
)
ensembler.cli_commands.refine_implicit.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--model_seqid_cutoff': None,
'--verbose': False,
'--openmm_platform': False,
'--gpupn': False,
'--simlength': '1.0',
}
)
ensembler.cli_commands.solvate.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--model_seqid_cutoff': None,
'--verbose': False,
}
)
ensembler.cli_commands.refine_explicit.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--model_seqid_cutoff': None,
'--verbose': False,
'--openmm_platform': False,
'--gpupn': False,
'--simlength': '1.0',
}
)
ensembler.cli_commands.package_models.dispatch(
args={
'--targetsfile': False,
'--targets': False,
'--templates': 'AURKB_HUMAN_D0_4AF3_A',
'--model_seqid_cutoff': None,
'--nFAHclones': False,
'--archiveFAHproject': False,
}
) | gpl-2.0 |
finway-china/p2pool | nattraverso/portmapper.py | 288 | 4157 | """
Generic NAT Port mapping interface.
TODO: Example
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from twisted.internet.base import BasePort
# Public API
def get_port_mapper(proto="TCP"):
"""
Returns a L{NATMapper} instance, suited to map a port for
the given protocol. Defaults to TCP.
For the moment, only upnp mapper is available. It accepts both UDP and TCP.
@param proto: The protocol: 'TCP' or 'UDP'
@type proto: string
@return: A deferred called with a L{NATMapper} instance
@rtype: L{twisted.internet.defer.Deferred}
"""
import nattraverso.pynupnp
return nattraverso.pynupnp.get_port_mapper()
class NATMapper:
"""
Define methods to map port objects (as returned by twisted's listenXX).
This allows NAT to be traversed from incoming packets.
Currently the only implementation of this class is the UPnP Mapper, which
can map UDP and TCP ports, if an UPnP Device exists.
"""
def __init__(self):
raise NotImplementedError("Cannot instantiate the class")
def map(self, port):
"""
Create a mapping for the given twisted's port object.
The deferred will call back with a tuple (extaddr, extport):
- extaddr: The ip string of the external ip address of this host
- extport: the external port number used to map the given Port object
When called multiple times with the same Port,
callback with the existing mapping.
@param port: The port object to map
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def info(self, port):
"""
Returns the existing mapping for the given port object. That means map()
has to be called before.
@param port: The port object to retreive info from
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@raise ValueError: When there is no such existing mapping
@return: a tuple (extaddress, extport).
@see: L{map() function<map>}
"""
raise NotImplementedError
def unmap(self, port):
"""
Remove an existing mapping for the given twisted's port object.
@param port: The port object to unmap
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with None
@rtype: L{twisted.internet.defer.Deferred}
@raise ValueError: When there is no such existing mapping
"""
raise NotImplementedError
def get_port_mappings(self):
"""
Returns a deferred that will be called with a dictionnary of the
existing mappings.
The dictionnary structure is the following:
- Keys: tuple (protocol, external_port)
- protocol is "TCP" or "UDP".
- external_port is the external port number, as see on the
WAN side.
- Values:tuple (internal_ip, internal_port)
- internal_ip is the LAN ip address of the host.
- internal_port is the internal port number mapped
to external_port.
@return: A deferred called with the above defined dictionnary
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def _check_valid_port(self, port):
"""Various Port object validity checks. Raise a ValueError."""
if not isinstance(port, BasePort):
raise ValueError("expected a Port, got %r"%(port))
if not port.connected:
raise ValueError("Port %r is not listening"%(port))
loc_addr = port.getHost()
if loc_addr.port == 0:
raise ValueError("Port %r has port number of 0"%(port))
| gpl-3.0 |
re4lfl0w/pyconkr-2015 | pyconkr/tests.py | 6 | 1158 | # -*- coding: utf-8 -*-
from django.test import TestCase
from django.http import HttpResponse
from django.test import Client
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib.auth import get_user_model
from pyconkr.helper import render_io_error
User = get_user_model()
class HelperFunctionTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_render_io_error(self):
a = render_io_error("test reason")
self.assertEqual(a.status_code, 406, "render io error status code must be 406")
class PaymentTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('testname', 'test@test.com', 'testpassword')
self.client.login(username='testname', password='testpassword')
def tearDown(self):
pass
def test_view_registration_payment(self):
url = reverse('registration_payment')
response = self.client.post(url, {'test': 1})
self.assertEqual(response['content-type'], 'application/javascript', 'error raise and must be ajax' )
print response.content
| mit |
ojengwa/odoo | addons/l10n_fr/report/compute_resultant_report.py | 374 | 4004 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class cdr(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(cdr, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(cdr, self).set_context(objects, data, ids)
self._load('cdr', self.localcontext['data']['form'])
self._set_variable(
'ct1',
self.localcontext['cdrc1']+self.localcontext['cdrc2']+self.localcontext['cdrc3']+
self.localcontext['cdrc4']+self.localcontext['cdrc5']+self.localcontext['cdrc6']+
self.localcontext['cdrc7']+self.localcontext['cdrc8']+self.localcontext['cdrc9']+
self.localcontext['cdrc10']+self.localcontext['cdrc11']+self.localcontext['cdrc12']+
self.localcontext['cdrc13']+self.localcontext['cdrc14']+self.localcontext['cdrc15']
)
self._set_variable(
'ct3',
self.localcontext['cdrc17']+self.localcontext['cdrc18']+self.localcontext['cdrc19']+
self.localcontext['cdrc20']
)
self._set_variable(
'ct4',
self.localcontext['cdrc21']+self.localcontext['cdrc22']+self.localcontext['cdrc23']
)
self._set_variable(
'charges',
self.localcontext['ct1']+self.localcontext['cdrc16']+self.localcontext['ct3']+
self.localcontext['ct4']+self.localcontext['cdrc24']+self.localcontext['cdrc25']
)
self._set_variable(
'pta',
self.localcontext['cdrp1']+self.localcontext['cdrp2']
)
self._set_variable(
'ptb',
self.localcontext['cdrp3']+self.localcontext['cdrp4']+self.localcontext['cdrp5']+
self.localcontext['cdrp6']+self.localcontext['cdrp7']
)
self._set_variable(
'pt1',
self.localcontext['pta']+self.localcontext['ptb']
)
self._set_variable(
'pt3',
self.localcontext['cdrp9']+self.localcontext['cdrp10']+self.localcontext['cdrp11']+
self.localcontext['cdrp12']+self.localcontext['cdrp13']+self.localcontext['cdrp14']
)
self._set_variable(
'pt4',
self.localcontext['cdrp15']+self.localcontext['cdrp16']+self.localcontext['cdrp17']
)
self._set_variable(
'produits',
self.localcontext['pt1']+self.localcontext['cdrp8']+self.localcontext['pt3']+
self.localcontext['pt4']
)
class wrapped_report_resultat(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrresultat'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrresultat'
_wrapped_report_class = cdr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
doomsterinc/odoo | addons/point_of_sale/wizard/pos_payment.py | 344 | 4976 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
from openerp.tools.translate import _
class account_journal(osv.osv):
_inherit = 'account.journal'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if not context:
context = {}
session_id = context.get('pos_session_id', False) or False
if session_id:
session = self.pool.get('pos.session').browse(cr, uid, session_id, context=context)
if session:
journal_ids = [journal.id for journal in session.config_id.journal_ids]
args += [('id', 'in', journal_ids)]
return super(account_journal, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
class pos_make_payment(osv.osv_memory):
_name = 'pos.make.payment'
_description = 'Point of Sale Payment'
def check(self, cr, uid, ids, context=None):
"""Check the order:
if the order is not paid: continue payment,
if the order is paid print ticket.
"""
context = context or {}
order_obj = self.pool.get('pos.order')
active_id = context and context.get('active_id', False)
order = order_obj.browse(cr, uid, active_id, context=context)
amount = order.amount_total - order.amount_paid
data = self.read(cr, uid, ids, context=context)[0]
# this is probably a problem of osv_memory as it's not compatible with normal OSV's
data['journal'] = data['journal_id'][0]
if amount != 0.0:
order_obj.add_payment(cr, uid, active_id, data, context=context)
if order_obj.test_paid(cr, uid, [active_id]):
order_obj.signal_workflow(cr, uid, [active_id], 'paid')
return {'type' : 'ir.actions.act_window_close' }
return self.launch_payment(cr, uid, ids, context=context)
def launch_payment(self, cr, uid, ids, context=None):
return {
'name': _('Payment'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
'context': context,
}
def print_report(self, cr, uid, ids, context=None):
active_id = context.get('active_id', [])
datas = {'ids' : [active_id]}
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.receipt',
'datas': datas,
}
def _default_journal(self, cr, uid, context=None):
if not context:
context = {}
session = False
order_obj = self.pool.get('pos.order')
active_id = context and context.get('active_id', False)
if active_id:
order = order_obj.browse(cr, uid, active_id, context=context)
session = order.session_id
if session:
for journal in session.config_id.journal_ids:
return journal.id
return False
def _default_amount(self, cr, uid, context=None):
order_obj = self.pool.get('pos.order')
active_id = context and context.get('active_id', False)
if active_id:
order = order_obj.browse(cr, uid, active_id, context=context)
return order.amount_total - order.amount_paid
return False
_columns = {
'journal_id' : fields.many2one('account.journal', 'Payment Mode', required=True),
'amount': fields.float('Amount', digits=(16,2), required= True),
'payment_name': fields.char('Payment Reference'),
'payment_date': fields.date('Payment Date', required=True),
}
_defaults = {
'journal_id' : _default_journal,
'payment_date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'amount': _default_amount,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/__future__.py | 134 | 4584 | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
"barry_as_FLUFL",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
CO_FUTURE_BARRY_AS_BDFL = 0x40000
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
(3, 9, 0, "alpha", 0),
CO_FUTURE_BARRY_AS_BDFL)
| lgpl-3.0 |
andersx/cclib | src/cclib/method/population.py | 5 | 3285 | # -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Population analyses based on cclib data."""
import logging
import numpy
from .calculationmethod import Method
class Population(Method):
"""An abstract base class for population-type methods."""
def __init__(self, data, progress=None, \
loglevel=logging.INFO, logname="Log"):
# Call the __init__ method of the superclass.
super(Population, self).__init__(data, progress, loglevel, logname)
self.fragresults = None
def __str__(self):
"""Return a string representation of the object."""
return "Population"
def __repr__(self):
"""Return a representation of the object."""
return "Population"
def partition(self, indices=None):
if not hasattr(self, "aoresults"):
self.calculate()
if not indices:
# Build list of groups of orbitals in each atom for atomresults.
if hasattr(self.data, "aonames"):
names = self.data.aonames
elif hasattr(self.data, "fonames"):
names = self.data.fonames
atoms = []
indices = []
name = names[0].split('_')[0]
atoms.append(name)
indices.append([0])
for i in range(1, len(names)):
name = names[i].split('_')[0]
try:
index = atoms.index(name)
except ValueError: #not found in atom list
atoms.append(name)
indices.append([i])
else:
indices[index].append(i)
natoms = len(indices)
nmocoeffs = len(self.aoresults[0])
# Build results numpy array[3].
alpha = len(self.aoresults[0])
results = []
results.append(numpy.zeros([alpha, natoms], "d"))
if len(self.aoresults) == 2:
beta = len(self.aoresults[1])
results.append(numpy.zeros([beta, natoms], "d"))
# For each spin, splice numpy array at ao index,
# and add to correct result row.
for spin in range(len(results)):
for i in range(natoms): # Number of groups.
for j in range(len(indices[i])): # For each group.
temp = self.aoresults[spin][:, indices[i][j]]
results[spin][:, i] = numpy.add(results[spin][:, i], temp)
self.logger.info("Saving partitioned results in fragresults: [array[2]]")
self.fragresults = results
return True
if __name__ == "__main__":
import doctest, population
doctest.testmod(population, verbose=False)
| lgpl-2.1 |
mxia/engine | build/gyp_environment.py | 101 | 1320 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Sets up various automatic gyp environment variables. These are used by
gyp_chromium and landmines.py which run at different stages of runhooks. To
make sure settings are consistent between them, all setup should happen here.
"""
import gyp_helper
import os
import sys
import vs_toolchain
def SetEnvironment():
"""Sets defaults for GYP_* variables."""
gyp_helper.apply_chromium_gyp_env()
# Default to ninja on linux and windows, but only if no generator has
# explicitly been set.
# Also default to ninja on mac, but only when not building chrome/ios.
# . -f / --format has precedence over the env var, no need to check for it
# . set the env var only if it hasn't been set yet
# . chromium.gyp_env has been applied to os.environ at this point already
if sys.platform.startswith(('linux', 'win', 'freebsd')) and \
not os.environ.get('GYP_GENERATORS'):
os.environ['GYP_GENERATORS'] = 'ninja'
elif sys.platform == 'darwin' and not os.environ.get('GYP_GENERATORS') and \
not 'OS=ios' in os.environ.get('GYP_DEFINES', []):
os.environ['GYP_GENERATORS'] = 'ninja'
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
| bsd-3-clause |
mhlechner/psi4 | psi4/driver/qcdb/periodictable.py | 2 | 78237 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Elemental masses (most common isotope), symbols, and atomic numbers from psi4.
"""
_temp_element = ["GHOST", "HYDROGEN", "HELIUM", "LITHIUM", "BERYLLIUM",
"BORON", "CARBON", "NITROGEN", "OXYGEN", "FLUORINE",
"NEON", "SODIUM", "MAGNESIUM", "ALUMINUM", "SILICON",
"PHOSPHORUS", "SULFUR", "CHLORINE", "ARGON", "POTASSIUM",
"CALCIUM", "SCANDIUM", "TITANIUM", "VANADIUM", "CHROMIUM",
"MANGANESE", "IRON", "COBALT", "NICKEL", "COPPER",
"ZINC", "GALLIUM", "GERMANIUM", "ARSENIC", "SELENIUM",
"BROMINE", "KRYPTON", "RUBIDIUM", "STRONTIUM", "YTTRIUM",
"ZIRCONIUM", "NIOBIUM", "MOLYBDENUM", "TECHNETIUM", "RUTHENIUM",
"RHODIUM", "PALLADIUM", "SILVER", "CADMIUM", "INDIUM",
"TIN", "ANTIMONY", "TELLURIUM", "IODINE", "XENON",
"CESIUM", "BARIUM", "LANTHANUM", "CERIUM", "PRASEODYMIUM",
"NEODYMIUM", "PROMETHIUM", "SAMARIUM", "EUROPIUM", "GADOLINIUM",
"TERBIUM", "DYSPROSIUM", "HOLMIUM", "ERBIUM", "THULIUM",
"YTTERBIUM", "LUTETIUM", "HAFNIUM", "TANTALUM", "TUNGSTEN",
"RHENIUM", "OSMIUM", "IRIDIUM", "PLATINUM", "GOLD",
"MERCURY", "THALLIUM", "LEAD", "BISMUTH", "POLONIUM",
"ASTATINE", "RADON", "FRANCIUM", "RADIUM", "ACTINIUM",
"THORIUM", "PROTACTINIUM", "URANIUM", "NEPTUNIUM", "PLUTONIUM",
"AMERICIUM", "CURIUM", "BERKELIUM", "CALIFORNIUM", "EINSTEINIUM",
"FERMIUM", "MENDELEVIUM", "NOBELIUM", "LAWRENCIUM" "RUTHERFORDIUM",
"DUBNIUM", "SEABORGIUM", "BOHRIUM"]
_temp_symbol = ["X", "H", "HE", "LI", "BE", "B", "C", "N", "O", "F", "NE", "NA", "MG",
"AL", "SI", "P", "S", "CL", "AR", "K", "CA", "SC", "TI", "V", "CR", "MN", "FE", "CO",
"NI", "CU", "ZN", "GA", "GE", "AS", "SE", "BR", "KR", "RB", "SR", "Y", "ZR", "NB",
"MO", "TC", "RU", "RH", "PD", "AG", "CD", "IN", "SN", "SB", "TE", "I", "XE", "CS",
"BA", "LA", "CE", "PR", "ND", "PM", "SM", "EU", "GD", "TB", "DY", "HO", "ER", "TM",
"YB", "LU", "HF", "TA", "W", "RE", "OS", "IR", "PT", "AU", "HG", "TL", "PB", "BI",
"PO", "AT", "RN", "FR", "RA", "AC", "TH", "PA", "U", "NP", "PU", "AM", "CM", "BK",
"CF", "ES", "FM", "MD", "NO", "LR", "RF", "DB", "SG", "BH", "HS", "MT", "DS", "RG",
"UUB", "UUT", "UUQ", "UUP", "UUH", "UUS", "UUO"]
_temp_z = list(range(0, 108))
_temp_mass = [
0., 1.00782503207, 4.00260325415, 7.016004548, 9.012182201, 11.009305406,
12, 14.00307400478, 15.99491461956, 18.998403224, 19.99244017542,
22.98976928087, 23.985041699, 26.981538627, 27.97692653246, 30.973761629,
31.972070999, 34.968852682, 39.96238312251, 38.963706679, 39.962590983,
44.955911909, 47.947946281, 50.943959507, 51.940507472, 54.938045141,
55.934937475, 58.933195048, 57.935342907, 62.929597474, 63.929142222,
68.925573587, 73.921177767, 74.921596478, 79.916521271, 78.918337087,
85.910610729, 84.911789737, 87.905612124, 88.905848295, 89.904704416,
92.906378058, 97.905408169, 98.906254747, 101.904349312, 102.905504292,
105.903485715, 106.90509682, 113.90335854, 114.903878484, 119.902194676,
120.903815686, 129.906224399, 126.904472681, 131.904153457, 132.905451932,
137.905247237, 138.906353267, 139.905438706, 140.907652769, 141.907723297,
144.912749023, 151.919732425, 152.921230339, 157.924103912, 158.925346757,
163.929174751, 164.93032207, 165.930293061, 168.93421325, 173.938862089,
174.940771819, 179.946549953, 180.947995763, 183.950931188, 186.955753109,
191.96148069, 192.96292643, 194.964791134, 196.966568662, 201.970643011,
204.974427541, 207.976652071, 208.980398734, 208.982430435, 210.987496271,
222.017577738, 222.01755173, 228.031070292, 227.027752127, 232.038055325,
231.03588399, 238.050788247, 237.048173444, 242.058742611, 243.06138108,
247.07035354, 247.07030708, 251.079586788, 252.082978512, 257.095104724,
258.098431319, 255.093241131, 260.105504, 263.112547, 255.107398, 259.114500,
262.122892, 263.128558, 265.136151, 281.162061, 272.153615, 283.171792, 283.176451,
285.183698, 287.191186, 292.199786, 291.206564, 293.214670]
_temp_iso_symbol = [
"H", "H1", "H2", "D", "H3", "T", "H4", "H5", "H6", "H7", "HE", "HE3", "HE4",
"HE5", "HE6", "HE7", "HE8", "HE9", "HE10", "LI", "LI3", "LI4", "LI5", "LI6",
"LI7", "LI8", "LI9", "LI10", "LI11", "LI12", "BE", "BE5", "BE6", "BE7", "BE8",
"BE9", "BE10", "BE11", "BE12", "BE13", "BE14", "BE15", "BE16", "B", "B6", "B7",
"B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", "B16", "B17", "B18", "B19",
"C", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18",
"C19", "C20", "C21", "C22", "N", "N10", "N11", "N12", "N13", "N14", "N15", "N16",
"N17", "N18", "N19", "N20", "N21", "N22", "N23", "N24", "N25", "O", "O12", "O13",
"O14", "O15", "O16", "O17", "O18", "O19", "O20", "O21", "O22", "O23", "O24",
"O25", "O26", "O27", "O28", "F", "F14", "F15", "F16", "F17", "F18", "F19", "F20",
"F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31",
"NE", "NE16", "NE17", "NE18", "NE19", "NE20", "NE21", "NE22", "NE23", "NE24",
"NE25", "NE26", "NE27", "NE28", "NE29", "NE30", "NE31", "NE32", "NE33", "NE34",
"NA", "NA18", "NA19", "NA20", "NA21", "NA22", "NA23", "NA24", "NA25", "NA26",
"NA27", "NA28", "NA29", "NA30", "NA31", "NA32", "NA33", "NA34", "NA35", "NA36",
"NA37", "MG", "MG19", "MG20", "MG21", "MG22", "MG23", "MG24", "MG25", "MG26",
"MG27", "MG28", "MG29", "MG30", "MG31", "MG32", "MG33", "MG34", "MG35", "MG36",
"MG37", "MG38", "MG39", "MG40", "AL", "AL21", "AL22", "AL23", "AL24", "AL25",
"AL26", "AL27", "AL28", "AL29", "AL30", "AL31", "AL32", "AL33", "AL34", "AL35",
"AL36", "AL37", "AL38", "AL39", "AL40", "AL41", "AL42", "SI", "SI22", "SI23",
"SI24", "SI25", "SI26", "SI27", "SI28", "SI29", "SI30", "SI31", "SI32", "SI33",
"SI34", "SI35", "SI36", "SI37", "SI38", "SI39", "SI40", "SI41", "SI42", "SI43",
"SI44", "P", "P24", "P25", "P26", "P27", "P28", "P29", "P30", "P31", "P32",
"P33", "P34", "P35", "P36", "P37", "P38", "P39", "P40", "P41", "P42", "P43",
"P44", "P45", "P46", "S", "S26", "S27", "S28", "S29", "S30", "S31", "S32", "S33",
"S34", "S35", "S36", "S37", "S38", "S39", "S40", "S41", "S42", "S43", "S44",
"S45", "S46", "S47", "S48", "S49", "CL", "CL28", "CL29", "CL30", "CL31", "CL32",
"CL33", "CL34", "CL35", "CL36", "CL37", "CL38", "CL39", "CL40", "CL41", "CL42",
"CL43", "CL44", "CL45", "CL46", "CL47", "CL48", "CL49", "CL50", "CL51", "AR",
"AR30", "AR31", "AR32", "AR33", "AR34", "AR35", "AR36", "AR37", "AR38", "AR39",
"AR40", "AR41", "AR42", "AR43", "AR44", "AR45", "AR46", "AR47", "AR48", "AR49",
"AR50", "AR51", "AR52", "AR53", "K", "K32", "K33", "K34", "K35", "K36", "K37",
"K38", "K39", "K40", "K41", "K42", "K43", "K44", "K45", "K46", "K47", "K48",
"K49", "K50", "K51", "K52", "K53", "K54", "K55", "CA", "CA34", "CA35", "CA36",
"CA37", "CA38", "CA39", "CA40", "CA41", "CA42", "CA43", "CA44", "CA45", "CA46",
"CA47", "CA48", "CA49", "CA50", "CA51", "CA52", "CA53", "CA54", "CA55", "CA56",
"CA57", "SC", "SC36", "SC37", "SC38", "SC39", "SC40", "SC41", "SC42", "SC43",
"SC44", "SC45", "SC46", "SC47", "SC48", "SC49", "SC50", "SC51", "SC52", "SC53",
"SC54", "SC55", "SC56", "SC57", "SC58", "SC59", "SC60", "TI", "TI38", "TI39",
"TI40", "TI41", "TI42", "TI43", "TI44", "TI45", "TI46", "TI47", "TI48", "TI49",
"TI50", "TI51", "TI52", "TI53", "TI54", "TI55", "TI56", "TI57", "TI58", "TI59",
"TI60", "TI61", "TI62", "TI63", "V", "V40", "V41", "V42", "V43", "V44", "V45",
"V46", "V47", "V48", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56",
"V57", "V58", "V59", "V60", "V61", "V62", "V63", "V64", "V65", "CR", "CR42",
"CR43", "CR44", "CR45", "CR46", "CR47", "CR48", "CR49", "CR50", "CR51", "CR52",
"CR53", "CR54", "CR55", "CR56", "CR57", "CR58", "CR59", "CR60", "CR61", "CR62",
"CR63", "CR64", "CR65", "CR66", "CR67", "MN", "MN44", "MN45", "MN46", "MN47",
"MN48", "MN49", "MN50", "MN51", "MN52", "MN53", "MN54", "MN55", "MN56", "MN57",
"MN58", "MN59", "MN60", "MN61", "MN62", "MN63", "MN64", "MN65", "MN66", "MN67",
"MN68", "MN69", "FE", "FE45", "FE46", "FE47", "FE48", "FE49", "FE50", "FE51",
"FE52", "FE53", "FE54", "FE55", "FE56", "FE57", "FE58", "FE59", "FE60", "FE61",
"FE62", "FE63", "FE64", "FE65", "FE66", "FE67", "FE68", "FE69", "FE70", "FE71",
"FE72", "CO", "CO47", "CO48", "CO49", "CO50", "CO51", "CO52", "CO53", "CO54",
"CO55", "CO56", "CO57", "CO58", "CO59", "CO60", "CO61", "CO62", "CO63", "CO64",
"CO65", "CO66", "CO67", "CO68", "CO69", "CO70", "CO71", "CO72", "CO73", "CO74",
"CO75", "NI", "NI48", "NI49", "NI50", "NI51", "NI52", "NI53", "NI54", "NI55",
"NI56", "NI57", "NI58", "NI59", "NI60", "NI61", "NI62", "NI63", "NI64", "NI65",
"NI66", "NI67", "NI68", "NI69", "NI70", "NI71", "NI72", "NI73", "NI74", "NI75",
"NI76", "NI77", "NI78", "CU", "CU52", "CU53", "CU54", "CU55", "CU56", "CU57",
"CU58", "CU59", "CU60", "CU61", "CU62", "CU63", "CU64", "CU65", "CU66", "CU67",
"CU68", "CU69", "CU70", "CU71", "CU72", "CU73", "CU74", "CU75", "CU76", "CU77",
"CU78", "CU79", "CU80", "ZN", "ZN54", "ZN55", "ZN56", "ZN57", "ZN58", "ZN59",
"ZN60", "ZN61", "ZN62", "ZN63", "ZN64", "ZN65", "ZN66", "ZN67", "ZN68", "ZN69",
"ZN70", "ZN71", "ZN72", "ZN73", "ZN74", "ZN75", "ZN76", "ZN77", "ZN78", "ZN79",
"ZN80", "ZN81", "ZN82", "ZN83", "GA", "GA56", "GA57", "GA58", "GA59", "GA60",
"GA61", "GA62", "GA63", "GA64", "GA65", "GA66", "GA67", "GA68", "GA69", "GA70",
"GA71", "GA72", "GA73", "GA74", "GA75", "GA76", "GA77", "GA78", "GA79", "GA80",
"GA81", "GA82", "GA83", "GA84", "GA85", "GA86", "GE", "GE58", "GE59", "GE60",
"GE61", "GE62", "GE63", "GE64", "GE65", "GE66", "GE67", "GE68", "GE69", "GE70",
"GE71", "GE72", "GE73", "GE74", "GE75", "GE76", "GE77", "GE78", "GE79", "GE80",
"GE81", "GE82", "GE83", "GE84", "GE85", "GE86", "GE87", "GE88", "GE89", "AS",
"AS60", "AS61", "AS62", "AS63", "AS64", "AS65", "AS66", "AS67", "AS68", "AS69",
"AS70", "AS71", "AS72", "AS73", "AS74", "AS75", "AS76", "AS77", "AS78", "AS79",
"AS80", "AS81", "AS82", "AS83", "AS84", "AS85", "AS86", "AS87", "AS88", "AS89",
"AS90", "AS91", "AS92", "SE", "SE65", "SE66", "SE67", "SE68", "SE69", "SE70",
"SE71", "SE72", "SE73", "SE74", "SE75", "SE76", "SE77", "SE78", "SE79", "SE80",
"SE81", "SE82", "SE83", "SE84", "SE85", "SE86", "SE87", "SE88", "SE89", "SE90",
"SE91", "SE92", "SE93", "SE94", "BR", "BR67", "BR68", "BR69", "BR70", "BR71",
"BR72", "BR73", "BR74", "BR75", "BR76", "BR77", "BR78", "BR79", "BR80", "BR81",
"BR82", "BR83", "BR84", "BR85", "BR86", "BR87", "BR88", "BR89", "BR90", "BR91",
"BR92", "BR93", "BR94", "BR95", "BR96", "BR97", "KR", "KR69", "KR70", "KR71",
"KR72", "KR73", "KR74", "KR75", "KR76", "KR77", "KR78", "KR79", "KR80", "KR81",
"KR82", "KR83", "KR84", "KR85", "KR86", "KR87", "KR88", "KR89", "KR90", "KR91",
"KR92", "KR93", "KR94", "KR95", "KR96", "KR97", "KR98", "KR99", "KR100", "RB",
"RB71", "RB72", "RB73", "RB74", "RB75", "RB76", "RB77", "RB78", "RB79", "RB80",
"RB81", "RB82", "RB83", "RB84", "RB85", "RB86", "RB87", "RB88", "RB89", "RB90",
"RB91", "RB92", "RB93", "RB94", "RB95", "RB96", "RB97", "RB98", "RB99",
"RB100", "RB101", "RB102", "SR", "SR73", "SR74", "SR75", "SR76", "SR77",
"SR78", "SR79", "SR80", "SR81", "SR82", "SR83", "SR84", "SR85", "SR86", "SR87",
"SR88", "SR89", "SR90", "SR91", "SR92", "SR93", "SR94", "SR95", "SR96", "SR97",
"SR98", "SR99", "SR100", "SR101", "SR102", "SR103", "SR104", "SR105", "Y",
"Y76", "Y77", "Y78", "Y79", "Y80", "Y81", "Y82", "Y83", "Y84", "Y85", "Y86",
"Y87", "Y88", "Y89", "Y90", "Y91", "Y92", "Y93", "Y94", "Y95", "Y96", "Y97",
"Y98", "Y99", "Y100", "Y101", "Y102", "Y103", "Y104", "Y105", "Y106", "Y107",
"Y108", "ZR", "ZR78", "ZR79", "ZR80", "ZR81", "ZR82", "ZR83", "ZR84", "ZR85",
"ZR86", "ZR87", "ZR88", "ZR89", "ZR90", "ZR91", "ZR92", "ZR93", "ZR94", "ZR95",
"ZR96", "ZR97", "ZR98", "ZR99", "ZR100", "ZR101", "ZR102", "ZR103", "ZR104",
"ZR105", "ZR106", "ZR107", "ZR108", "ZR109", "ZR110", "NB", "NB81", "NB82",
"NB83", "NB84", "NB85", "NB86", "NB87", "NB88", "NB89", "NB90", "NB91", "NB92",
"NB93", "NB94", "NB95", "NB96", "NB97", "NB98", "NB99", "NB100", "NB101",
"NB102", "NB103", "NB104", "NB105", "NB106", "NB107", "NB108", "NB109",
"NB110", "NB111", "NB112", "NB113", "MO", "MO83", "MO84", "MO85", "MO86",
"MO87", "MO88", "MO89", "MO90", "MO91", "MO92", "MO93", "MO94", "MO95", "MO96",
"MO97", "MO98", "MO99", "MO100", "MO101", "MO102", "MO103", "MO104", "MO105",
"MO106", "MO107", "MO108", "MO109", "MO110", "MO111", "MO112", "MO113",
"MO114", "MO115", "TC", "TC85", "TC86", "TC87", "TC88", "TC89", "TC90", "TC91",
"TC92", "TC93", "TC94", "TC95", "TC96", "TC97", "TC98", "TC99", "TC100",
"TC101", "TC102", "TC103", "TC104", "TC105", "TC106", "TC107", "TC108",
"TC109", "TC110", "TC111", "TC112", "TC113", "TC114", "TC115", "TC116",
"TC117", "TC118", "RU", "RU87", "RU88", "RU89", "RU90", "RU91", "RU92", "RU93",
"RU94", "RU95", "RU96", "RU97", "RU98", "RU99", "RU100", "RU101", "RU102",
"RU103", "RU104", "RU105", "RU106", "RU107", "RU108", "RU109", "RU110",
"RU111", "RU112", "RU113", "RU114", "RU115", "RU116", "RU117", "RU118",
"RU119", "RU120", "RH", "RH89", "RH90", "RH91", "RH92", "RH93", "RH94", "RH95",
"RH96", "RH97", "RH98", "RH99", "RH100", "RH101", "RH102", "RH103", "RH104",
"RH105", "RH106", "RH107", "RH108", "RH109", "RH110", "RH111", "RH112",
"RH113", "RH114", "RH115", "RH116", "RH117", "RH118", "RH119", "RH120",
"RH121", "RH122", "PD", "PD91", "PD92", "PD93", "PD94", "PD95", "PD96", "PD97",
"PD98", "PD99", "PD100", "PD101", "PD102", "PD103", "PD104", "PD105", "PD106",
"PD107", "PD108", "PD109", "PD110", "PD111", "PD112", "PD113", "PD114",
"PD115", "PD116", "PD117", "PD118", "PD119", "PD120", "PD121", "PD122",
"PD123", "PD124", "AG", "AG93", "AG94", "AG95", "AG96", "AG97", "AG98", "AG99",
"AG100", "AG101", "AG102", "AG103", "AG104", "AG105", "AG106", "AG107",
"AG108", "AG109", "AG110", "AG111", "AG112", "AG113", "AG114", "AG115",
"AG116", "AG117", "AG118", "AG119", "AG120", "AG121", "AG122", "AG123",
"AG124", "AG125", "AG126", "AG127", "AG128", "AG129", "AG130", "CD", "CD95",
"CD96", "CD97", "CD98", "CD99", "CD100", "CD101", "CD102", "CD103", "CD104",
"CD105", "CD106", "CD107", "CD108", "CD109", "CD110", "CD111", "CD112",
"CD113", "CD114", "CD115", "CD116", "CD117", "CD118", "CD119", "CD120",
"CD121", "CD122", "CD123", "CD124", "CD125", "CD126", "CD127", "CD128",
"CD129", "CD130", "CD131", "CD132", "IN", "IN97", "IN98", "IN99", "IN100",
"IN101", "IN102", "IN103", "IN104", "IN105", "IN106", "IN107", "IN108",
"IN109", "IN110", "IN111", "IN112", "IN113", "IN114", "IN115", "IN116",
"IN117", "IN118", "IN119", "IN120", "IN121", "IN122", "IN123", "IN124",
"IN125", "IN126", "IN127", "IN128", "IN129", "IN130", "IN131", "IN132",
"IN133", "IN134", "IN135", "SN", "SN99", "SN100", "SN101", "SN102", "SN103",
"SN104", "SN105", "SN106", "SN107", "SN108", "SN109", "SN110", "SN111",
"SN112", "SN113", "SN114", "SN115", "SN116", "SN117", "SN118", "SN119",
"SN120", "SN121", "SN122", "SN123", "SN124", "SN125", "SN126", "SN127",
"SN128", "SN129", "SN130", "SN131", "SN132", "SN133", "SN134", "SN135",
"SN136", "SN137", "SB", "SB103", "SB104", "SB105", "SB106", "SB107", "SB108",
"SB109", "SB110", "SB111", "SB112", "SB113", "SB114", "SB115", "SB116",
"SB117", "SB118", "SB119", "SB120", "SB121", "SB122", "SB123", "SB124",
"SB125", "SB126", "SB127", "SB128", "SB129", "SB130", "SB131", "SB132",
"SB133", "SB134", "SB135", "SB136", "SB137", "SB138", "SB139", "TE", "TE105",
"TE106", "TE107", "TE108", "TE109", "TE110", "TE111", "TE112", "TE113",
"TE114", "TE115", "TE116", "TE117", "TE118", "TE119", "TE120", "TE121",
"TE122", "TE123", "TE124", "TE125", "TE126", "TE127", "TE128", "TE129",
"TE130", "TE131", "TE132", "TE133", "TE134", "TE135", "TE136", "TE137",
"TE138", "TE139", "TE140", "TE141", "TE142", "I", "I108", "I109", "I110",
"I111", "I112", "I113", "I114", "I115", "I116", "I117", "I118", "I119", "I120",
"I121", "I122", "I123", "I124", "I125", "I126", "I127", "I128", "I129", "I130",
"I131", "I132", "I133", "I134", "I135", "I136", "I137", "I138", "I139", "I140",
"I141", "I142", "I143", "I144", "XE", "XE110", "XE111", "XE112", "XE113",
"XE114", "XE115", "XE116", "XE117", "XE118", "XE119", "XE120", "XE121",
"XE122", "XE123", "XE124", "XE125", "XE126", "XE127", "XE128", "XE129",
"XE130", "XE131", "XE132", "XE133", "XE134", "XE135", "XE136", "XE137",
"XE138", "XE139", "XE140", "XE141", "XE142", "XE143", "XE144", "XE145",
"XE146", "XE147", "CS", "CS112", "CS113", "CS114", "CS115", "CS116", "CS117",
"CS118", "CS119", "CS120", "CS121", "CS122", "CS123", "CS124", "CS125",
"CS126", "CS127", "CS128", "CS129", "CS130", "CS131", "CS132", "CS133",
"CS134", "CS135", "CS136", "CS137", "CS138", "CS139", "CS140", "CS141",
"CS142", "CS143", "CS144", "CS145", "CS146", "CS147", "CS148", "CS149",
"CS150", "CS151", "BA", "BA114", "BA115", "BA116", "BA117", "BA118", "BA119",
"BA120", "BA121", "BA122", "BA123", "BA124", "BA125", "BA126", "BA127",
"BA128", "BA129", "BA130", "BA131", "BA132", "BA133", "BA134", "BA135",
"BA136", "BA137", "BA138", "BA139", "BA140", "BA141", "BA142", "BA143",
"BA144", "BA145", "BA146", "BA147", "BA148", "BA149", "BA150", "BA151",
"BA152", "BA153", "LA", "LA117", "LA118", "LA119", "LA120", "LA121", "LA122",
"LA123", "LA124", "LA125", "LA126", "LA127", "LA128", "LA129", "LA130",
"LA131", "LA132", "LA133", "LA134", "LA135", "LA136", "LA137", "LA138",
"LA139", "LA140", "LA141", "LA142", "LA143", "LA144", "LA145", "LA146",
"LA147", "LA148", "LA149", "LA150", "LA151", "LA152", "LA153", "LA154",
"LA155", "CE", "CE119", "CE120", "CE121", "CE122", "CE123", "CE124", "CE125",
"CE126", "CE127", "CE128", "CE129", "CE130", "CE131", "CE132", "CE133",
"CE134", "CE135", "CE136", "CE137", "CE138", "CE139", "CE140", "CE141",
"CE142", "CE143", "CE144", "CE145", "CE146", "CE147", "CE148", "CE149",
"CE150", "CE151", "CE152", "CE153", "CE154", "CE155", "CE156", "CE157", "PR",
"PR121", "PR122", "PR123", "PR124", "PR125", "PR126", "PR127", "PR128",
"PR129", "PR130", "PR131", "PR132", "PR133", "PR134", "PR135", "PR136",
"PR137", "PR138", "PR139", "PR140", "PR141", "PR142", "PR143", "PR144",
"PR145", "PR146", "PR147", "PR148", "PR149", "PR150", "PR151", "PR152",
"PR153", "PR154", "PR155", "PR156", "PR157", "PR158", "PR159", "ND", "ND124",
"ND125", "ND126", "ND127", "ND128", "ND129", "ND130", "ND131", "ND132",
"ND133", "ND134", "ND135", "ND136", "ND137", "ND138", "ND139", "ND140",
"ND141", "ND142", "ND143", "ND144", "ND145", "ND146", "ND147", "ND148",
"ND149", "ND150", "ND151", "ND152", "ND153", "ND154", "ND155", "ND156",
"ND157", "ND158", "ND159", "ND160", "ND161", "PM", "PM126", "PM127", "PM128",
"PM129", "PM130", "PM131", "PM132", "PM133", "PM134", "PM135", "PM136",
"PM137", "PM138", "PM139", "PM140", "PM141", "PM142", "PM143", "PM144",
"PM145", "PM146", "PM147", "PM148", "PM149", "PM150", "PM151", "PM152",
"PM153", "PM154", "PM155", "PM156", "PM157", "PM158", "PM159", "PM160",
"PM161", "PM162", "PM163", "SM", "SM128", "SM129", "SM130", "SM131", "SM132",
"SM133", "SM134", "SM135", "SM136", "SM137", "SM138", "SM139", "SM140",
"SM141", "SM142", "SM143", "SM144", "SM145", "SM146", "SM147", "SM148",
"SM149", "SM150", "SM151", "SM152", "SM153", "SM154", "SM155", "SM156",
"SM157", "SM158", "SM159", "SM160", "SM161", "SM162", "SM163", "SM164",
"SM165", "EU", "EU130", "EU131", "EU132", "EU133", "EU134", "EU135", "EU136",
"EU137", "EU138", "EU139", "EU140", "EU141", "EU142", "EU143", "EU144",
"EU145", "EU146", "EU147", "EU148", "EU149", "EU150", "EU151", "EU152",
"EU153", "EU154", "EU155", "EU156", "EU157", "EU158", "EU159", "EU160",
"EU161", "EU162", "EU163", "EU164", "EU165", "EU166", "EU167", "GD", "GD134",
"GD135", "GD136", "GD137", "GD138", "GD139", "GD140", "GD141", "GD142",
"GD143", "GD144", "GD145", "GD146", "GD147", "GD148", "GD149", "GD150",
"GD151", "GD152", "GD153", "GD154", "GD155", "GD156", "GD157", "GD158",
"GD159", "GD160", "GD161", "GD162", "GD163", "GD164", "GD165", "GD166",
"GD167", "GD168", "GD169", "TB", "TB136", "TB137", "TB138", "TB139", "TB140",
"TB141", "TB142", "TB143", "TB144", "TB145", "TB146", "TB147", "TB148",
"TB149", "TB150", "TB151", "TB152", "TB153", "TB154", "TB155", "TB156",
"TB157", "TB158", "TB159", "TB160", "TB161", "TB162", "TB163", "TB164",
"TB165", "TB166", "TB167", "TB168", "TB169", "TB170", "TB171", "DY", "DY138",
"DY139", "DY140", "DY141", "DY142", "DY143", "DY144", "DY145", "DY146",
"DY147", "DY148", "DY149", "DY150", "DY151", "DY152", "DY153", "DY154",
"DY155", "DY156", "DY157", "DY158", "DY159", "DY160", "DY161", "DY162",
"DY163", "DY164", "DY165", "DY166", "DY167", "DY168", "DY169", "DY170",
"DY171", "DY172", "DY173", "HO", "HO140", "HO141", "HO142", "HO143", "HO144",
"HO145", "HO146", "HO147", "HO148", "HO149", "HO150", "HO151", "HO152",
"HO153", "HO154", "HO155", "HO156", "HO157", "HO158", "HO159", "HO160",
"HO161", "HO162", "HO163", "HO164", "HO165", "HO166", "HO167", "HO168",
"HO169", "HO170", "HO171", "HO172", "HO173", "HO174", "HO175", "ER", "ER143",
"ER144", "ER145", "ER146", "ER147", "ER148", "ER149", "ER150", "ER151",
"ER152", "ER153", "ER154", "ER155", "ER156", "ER157", "ER158", "ER159",
"ER160", "ER161", "ER162", "ER163", "ER164", "ER165", "ER166", "ER167",
"ER168", "ER169", "ER170", "ER171", "ER172", "ER173", "ER174", "ER175",
"ER176", "ER177", "TM", "TM145", "TM146", "TM147", "TM148", "TM149", "TM150",
"TM151", "TM152", "TM153", "TM154", "TM155", "TM156", "TM157", "TM158",
"TM159", "TM160", "TM161", "TM162", "TM163", "TM164", "TM165", "TM166",
"TM167", "TM168", "TM169", "TM170", "TM171", "TM172", "TM173", "TM174",
"TM175", "TM176", "TM177", "TM178", "TM179", "YB", "YB148", "YB149", "YB150",
"YB151", "YB152", "YB153", "YB154", "YB155", "YB156", "YB157", "YB158",
"YB159", "YB160", "YB161", "YB162", "YB163", "YB164", "YB165", "YB166",
"YB167", "YB168", "YB169", "YB170", "YB171", "YB172", "YB173", "YB174",
"YB175", "YB176", "YB177", "YB178", "YB179", "YB180", "YB181", "LU", "LU150",
"LU151", "LU152", "LU153", "LU154", "LU155", "LU156", "LU157", "LU158",
"LU159", "LU160", "LU161", "LU162", "LU163", "LU164", "LU165", "LU166",
"LU167", "LU168", "LU169", "LU170", "LU171", "LU172", "LU173", "LU174",
"LU175", "LU176", "LU177", "LU178", "LU179", "LU180", "LU181", "LU182",
"LU183", "LU184", "HF", "HF153", "HF154", "HF155", "HF156", "HF157", "HF158",
"HF159", "HF160", "HF161", "HF162", "HF163", "HF164", "HF165", "HF166",
"HF167", "HF168", "HF169", "HF170", "HF171", "HF172", "HF173", "HF174",
"HF175", "HF176", "HF177", "HF178", "HF179", "HF180", "HF181", "HF182",
"HF183", "HF184", "HF185", "HF186", "HF187", "HF188", "TA", "TA155", "TA156",
"TA157", "TA158", "TA159", "TA160", "TA161", "TA162", "TA163", "TA164",
"TA165", "TA166", "TA167", "TA168", "TA169", "TA170", "TA171", "TA172",
"TA173", "TA174", "TA175", "TA176", "TA177", "TA178", "TA179", "TA180",
"TA181", "TA182", "TA183", "TA184", "TA185", "TA186", "TA187", "TA188",
"TA189", "TA190", "W", "W158", "W159", "W160", "W161", "W162", "W163", "W164",
"W165", "W166", "W167", "W168", "W169", "W170", "W171", "W172", "W173", "W174",
"W175", "W176", "W177", "W178", "W179", "W180", "W181", "W182", "W183", "W184",
"W185", "W186", "W187", "W188", "W189", "W190", "W191", "W192", "RE", "RE160",
"RE161", "RE162", "RE163", "RE164", "RE165", "RE166", "RE167", "RE168",
"RE169", "RE170", "RE171", "RE172", "RE173", "RE174", "RE175", "RE176",
"RE177", "RE178", "RE179", "RE180", "RE181", "RE182", "RE183", "RE184",
"RE185", "RE186", "RE187", "RE188", "RE189", "RE190", "RE191", "RE192",
"RE193", "RE194", "OS", "OS162", "OS163", "OS164", "OS165", "OS166", "OS167",
"OS168", "OS169", "OS170", "OS171", "OS172", "OS173", "OS174", "OS175",
"OS176", "OS177", "OS178", "OS179", "OS180", "OS181", "OS182", "OS183",
"OS184", "OS185", "OS186", "OS187", "OS188", "OS189", "OS190", "OS191",
"OS192", "OS193", "OS194", "OS195", "OS196", "IR", "IR164", "IR165", "IR166",
"IR167", "IR168", "IR169", "IR170", "IR171", "IR172", "IR173", "IR174",
"IR175", "IR176", "IR177", "IR178", "IR179", "IR180", "IR181", "IR182",
"IR183", "IR184", "IR185", "IR186", "IR187", "IR188", "IR189", "IR190",
"IR191", "IR192", "IR193", "IR194", "IR195", "IR196", "IR197", "IR198",
"IR199", "PT", "PT166", "PT167", "PT168", "PT169", "PT170", "PT171", "PT172",
"PT173", "PT174", "PT175", "PT176", "PT177", "PT178", "PT179", "PT180",
"PT181", "PT182", "PT183", "PT184", "PT185", "PT186", "PT187", "PT188",
"PT189", "PT190", "PT191", "PT192", "PT193", "PT194", "PT195", "PT196",
"PT197", "PT198", "PT199", "PT200", "PT201", "PT202", "AU", "AU169", "AU170",
"AU171", "AU172", "AU173", "AU174", "AU175", "AU176", "AU177", "AU178",
"AU179", "AU180", "AU181", "AU182", "AU183", "AU184", "AU185", "AU186",
"AU187", "AU188", "AU189", "AU190", "AU191", "AU192", "AU193", "AU194",
"AU195", "AU196", "AU197", "AU198", "AU199", "AU200", "AU201", "AU202",
"AU203", "AU204", "AU205", "HG", "HG171", "HG172", "HG173", "HG174", "HG175",
"HG176", "HG177", "HG178", "HG179", "HG180", "HG181", "HG182", "HG183",
"HG184", "HG185", "HG186", "HG187", "HG188", "HG189", "HG190", "HG191",
"HG192", "HG193", "HG194", "HG195", "HG196", "HG197", "HG198", "HG199",
"HG200", "HG201", "HG202", "HG203", "HG204", "HG205", "HG206", "HG207",
"HG208", "HG209", "HG210", "TL", "TL176", "TL177", "TL178", "TL179", "TL180",
"TL181", "TL182", "TL183", "TL184", "TL185", "TL186", "TL187", "TL188",
"TL189", "TL190", "TL191", "TL192", "TL193", "TL194", "TL195", "TL196",
"TL197", "TL198", "TL199", "TL200", "TL201", "TL202", "TL203", "TL204",
"TL205", "TL206", "TL207", "TL208", "TL209", "TL210", "TL211", "TL212", "PB",
"PB178", "PB179", "PB180", "PB181", "PB182", "PB183", "PB184", "PB185",
"PB186", "PB187", "PB188", "PB189", "PB190", "PB191", "PB192", "PB193",
"PB194", "PB195", "PB196", "PB197", "PB198", "PB199", "PB200", "PB201",
"PB202", "PB203", "PB204", "PB205", "PB206", "PB207", "PB208", "PB209",
"PB210", "PB211", "PB212", "PB213", "PB214", "PB215", "BI", "BI184", "BI185",
"BI186", "BI187", "BI188", "BI189", "BI190", "BI191", "BI192", "BI193",
"BI194", "BI195", "BI196", "BI197", "BI198", "BI199", "BI200", "BI201",
"BI202", "BI203", "BI204", "BI205", "BI206", "BI207", "BI208", "BI209",
"BI210", "BI211", "BI212", "BI213", "BI214", "BI215", "BI216", "BI217",
"BI218", "PO", "PO188", "PO189", "PO190", "PO191", "PO192", "PO193", "PO194",
"PO195", "PO196", "PO197", "PO198", "PO199", "PO200", "PO201", "PO202",
"PO203", "PO204", "PO205", "PO206", "PO207", "PO208", "PO209", "PO210",
"PO211", "PO212", "PO213", "PO214", "PO215", "PO216", "PO217", "PO218",
"PO219", "PO220", "AT", "AT193", "AT194", "AT195", "AT196", "AT197", "AT198",
"AT199", "AT200", "AT201", "AT202", "AT203", "AT204", "AT205", "AT206",
"AT207", "AT208", "AT209", "AT210", "AT211", "AT212", "AT213", "AT214",
"AT215", "AT216", "AT217", "AT218", "AT219", "AT220", "AT221", "AT222",
"AT223", "RN", "RN195", "RN196", "RN197", "RN198", "RN199", "RN200", "RN201",
"RN202", "RN203", "RN204", "RN205", "RN206", "RN207", "RN208", "RN209",
"RN210", "RN211", "RN212", "RN213", "RN214", "RN215", "RN216", "RN217",
"RN218", "RN219", "RN220", "RN221", "RN222", "RN223", "RN224", "RN225",
"RN226", "RN227", "RN228", "FR", "FR199", "FR200", "FR201", "FR202", "FR203",
"FR204", "FR205", "FR206", "FR207", "FR208", "FR209", "FR210", "FR211",
"FR212", "FR213", "FR214", "FR215", "FR216", "FR217", "FR218", "FR219",
"FR220", "FR221", "FR222", "FR223", "FR224", "FR225", "FR226", "FR227",
"FR228", "FR229", "FR230", "FR231", "FR232", "RA", "RA202", "RA203", "RA204",
"RA205", "RA206", "RA207", "RA208", "RA209", "RA210", "RA211", "RA212",
"RA213", "RA214", "RA215", "RA216", "RA217", "RA218", "RA219", "RA220",
"RA221", "RA222", "RA223", "RA224", "RA225", "RA226", "RA227", "RA228",
"RA229", "RA230", "RA231", "RA232", "RA233", "RA234", "AC", "AC206", "AC207",
"AC208", "AC209", "AC210", "AC211", "AC212", "AC213", "AC214", "AC215",
"AC216", "AC217", "AC218", "AC219", "AC220", "AC221", "AC222", "AC223",
"AC224", "AC225", "AC226", "AC227", "AC228", "AC229", "AC230", "AC231",
"AC232", "AC233", "AC234", "AC235", "AC236", "TH", "TH209", "TH210", "TH211",
"TH212", "TH213", "TH214", "TH215", "TH216", "TH217", "TH218", "TH219",
"TH220", "TH221", "TH222", "TH223", "TH224", "TH225", "TH226", "TH227",
"TH228", "TH229", "TH230", "TH231", "TH232", "TH233", "TH234", "TH235",
"TH236", "TH237", "TH238", "PA", "PA212", "PA213", "PA214", "PA215", "PA216",
"PA217", "PA218", "PA219", "PA220", "PA221", "PA222", "PA223", "PA224",
"PA225", "PA226", "PA227", "PA228", "PA229", "PA230", "PA231", "PA232",
"PA233", "PA234", "PA235", "PA236", "PA237", "PA238", "PA239", "PA240", "U",
"U217", "U218", "U219", "U220", "U221", "U222", "U223", "U224", "U225", "U226",
"U227", "U228", "U229", "U230", "U231", "U232", "U233", "U234", "U235", "U236",
"U237", "U238", "U239", "U240", "U241", "U242", "NP", "NP225", "NP226",
"NP227", "NP228", "NP229", "NP230", "NP231", "NP232", "NP233", "NP234",
"NP235", "NP236", "NP237", "NP238", "NP239", "NP240", "NP241", "NP242",
"NP243", "NP244", "PU", "PU228", "PU229", "PU230", "PU231", "PU232", "PU233",
"PU234", "PU235", "PU236", "PU237", "PU238", "PU239", "PU240", "PU241",
"PU242", "PU243", "PU244", "PU245", "PU246", "PU247", "AM", "AM231", "AM232",
"AM233", "AM234", "AM235", "AM236", "AM237", "AM238", "AM239", "AM240",
"AM241", "AM242", "AM243", "AM244", "AM245", "AM246", "AM247", "AM248",
"AM249", "CM", "CM233", "CM234", "CM235", "CM236", "CM237", "CM238", "CM239",
"CM240", "CM241", "CM242", "CM243", "CM244", "CM245", "CM246", "CM247",
"CM248", "CM249", "CM250", "CM251", "CM252", "BK", "BK235", "BK236", "BK237",
"BK238", "BK239", "BK240", "BK241", "BK242", "BK243", "BK244", "BK245",
"BK246", "BK247", "BK248", "BK249", "BK250", "BK251", "BK252", "BK253",
"BK254", "CF", "CF237", "CF238", "CF239", "CF240", "CF241", "CF242", "CF243",
"CF244", "CF245", "CF246", "CF247", "CF248", "CF249", "CF250", "CF251",
"CF252", "CF253", "CF254", "CF255", "CF256", "ES", "ES240", "ES241", "ES242",
"ES243", "ES244", "ES245", "ES246", "ES247", "ES248", "ES249", "ES250",
"ES251", "ES252", "ES253", "ES254", "ES255", "ES256", "ES257", "ES258", "FM",
"FM242", "FM243", "FM244", "FM245", "FM246", "FM247", "FM248", "FM249",
"FM250", "FM251", "FM252", "FM253", "FM254", "FM255", "FM256", "FM257",
"FM258", "FM259", "FM260", "MD", "MD245", "MD246", "MD247", "MD248", "MD249",
"MD250", "MD251", "MD252", "MD253", "MD254", "MD255", "MD256", "MD257",
"MD258", "MD259", "MD260", "MD261", "MD262", "NO", "NO248", "NO249", "NO250",
"NO251", "NO252", "NO253", "NO254", "NO255", "NO256", "NO257", "NO258",
"NO259", "NO260", "NO261", "NO262", "NO263", "NO264", "LR", "LR251", "LR252",
"LR253", "LR254", "LR255", "LR256", "LR257", "LR258", "LR259", "LR260",
"LR261", "LR262", "LR263", "LR264", "LR265", "LR266", "RF", "RF253", "RF254",
"RF255", "RF256", "RF257", "RF258", "RF259", "RF260", "RF261", "RF262",
"RF263", "RF264", "RF265", "RF266", "RF267", "RF268", "DB", "DB255", "DB256",
"DB257", "DB258", "DB259", "DB260", "DB261", "DB262", "DB263", "DB264",
"DB265", "DB266", "DB267", "DB268", "DB269", "DB270", "SG", "SG258", "SG259",
"SG260", "SG261", "SG262", "SG263", "SG264", "SG265", "SG266", "SG267",
"SG268", "SG269", "SG270", "SG271", "SG272", "SG273", "BH", "BH260", "BH261",
"BH262", "BH263", "BH264", "BH265", "BH266", "BH267", "BH268", "BH269",
"BH270", "BH271", "BH272", "BH273", "BH274", "BH275", "HS", "HS263", "HS264",
"HS265", "HS266", "HS267", "HS268", "HS269", "HS270", "HS271", "HS272",
"HS273", "HS274", "HS275", "HS276", "HS277", "MT", "MT265", "MT266", "MT267",
"MT268", "MT269", "MT270", "MT271", "MT272", "MT273", "MT274", "MT275",
"MT276", "MT277", "MT278", "MT279", "DS", "DS267", "DS268", "DS269", "DS270",
"DS271", "DS272", "DS273", "DS274", "DS275", "DS276", "DS277", "DS278",
"DS279", "DS280", "DS281", "RG", "RG272", "RG273", "RG274", "RG275", "RG276",
"RG277", "RG278", "RG279", "RG280", "RG281", "RG282", "RG283", "UUB",
"UUB277", "UUB278", "UUB279", "UUB280", "UUB281", "UUB282", "UUB283",
"UUB284", "UUB285", "UUT", "UUT283", "UUT284", "UUT285", "UUT286", "UUT287",
"UUQ", "UUQ285", "UUQ286", "UUQ287", "UUQ288", "UUQ289", "UUP", "UUP287",
"UUP288", "UUP289", "UUP290", "UUP291", "UUH", "UUH289", "UUH290", "UUH291",
"UUH292", "UUS", "UUS291", "UUS292", "UUO", "UUO293"]
_temp_iso_mass = [
1.00782503207, 1.00782503207, 2.01410177785, 2.01410177785, 3.01604927767,
3.01604927767, 4.027806424, 5.035311488, 6.044942594, 7.052749,
4.00260325415, 3.01602931914, 4.00260325415, 5.012223624, 6.018889124,
7.028020618, 8.033921897, 9.043950286, 10.052398837, 7.016004548, 3.030775,
4.027185558, 5.0125378, 6.015122794, 7.016004548, 8.022487362, 9.026789505,
10.035481259, 11.043797715, 12.053780, 9.012182201, 5.040790, 6.019726317,
7.016929828, 8.005305103, 9.012182201, 10.013533818, 11.021657749,
12.026920737, 13.035693007, 14.04289292, 15.053460, 16.061920, 11.009305406,
6.046810, 7.029917901, 8.024607233, 9.013328782, 10.012936992, 11.009305406,
12.014352104, 13.017780217, 14.025404009, 15.031103021, 16.039808829,
17.046989906, 18.056170, 19.063730, 12, 8.037675025, 9.031036689,
10.016853228, 11.011433613, 12, 13.00335483778, 14.0032419887, 15.010599256,
16.014701252, 17.022586116, 18.026759354, 19.034805018, 20.040319754,
21.049340, 22.057200, 14.00307400478, 10.041653674, 11.026090956,
12.018613197, 13.005738609, 14.00307400478, 15.00010889823, 16.006101658,
17.008450261, 18.014078959, 19.017028697, 20.023365807, 21.02710824,
22.034394934, 23.041220, 24.051040, 25.060660, 15.99491461956,
12.034404895, 13.024812213, 14.00859625, 15.003065617, 15.99491461956,
16.999131703, 17.999161001, 19.00358013, 20.004076742, 21.008655886,
22.009966947, 23.015687659, 24.020472917, 25.029460, 26.038340, 27.048260,
28.057810, 18.998403224, 14.035060, 15.018009103, 16.011465724,
17.002095237, 18.000937956, 18.998403224, 19.999981315, 20.999948951,
22.002998815, 23.003574631, 24.008115485, 25.012101747, 26.019615555,
27.026760086, 28.035670, 29.043260, 30.052500, 31.060429, 19.99244017542,
16.025761262, 17.017671504, 18.005708213, 19.001880248, 19.99244017542,
20.993846684, 21.991385113, 22.994466904, 23.993610779, 24.997736888,
26.000461206, 27.007589903, 28.012071575, 29.019385933, 30.024801045,
31.033110, 32.040020, 33.049380, 34.057028, 22.98976928087, 18.025969,
19.013877499, 20.007351328, 20.997655206, 21.994436425, 22.98976928087,
23.990962782, 24.989953968, 25.992633, 26.994076788, 27.998938, 29.002861,
30.008976, 31.013585452, 32.02046656, 33.026719756, 34.035170, 35.042493,
36.051480, 37.059340, 23.985041699, 19.03547, 20.018862545, 21.01171291,
21.999573843, 22.994123669, 23.985041699, 24.985836917, 25.982592929,
26.984340585, 27.983876825, 28.9886, 29.990434, 30.996546, 31.998975,
33.005254, 34.009456424, 35.017340, 36.023000, 37.031400, 38.037570,
39.046772, 40.053930, 26.981538627, 21.028040, 22.019520, 23.007267432,
23.999938865, 24.990428095, 25.986891692, 26.981538627, 27.981910306,
28.980445046, 29.982960256, 30.983946619, 31.988124489, 32.990843336,
33.996851837, 34.999860235, 36.006207204, 37.01067782, 38.017231021,
39.02297, 40.031450, 41.038330, 42.046890, 27.97692653246, 22.034530,
23.025520, 24.011545616, 25.004105574, 25.992329921, 26.986704905,
27.97692653246, 28.9764947, 29.973770171, 30.975363226999998,
31.974148082, 32.97800022, 33.978575524, 34.984583575, 35.986599477,
36.99293608, 37.995633601, 39.002070013, 40.005869121, 41.01456,
42.019790, 43.028660, 44.035260, 30.973761629, 24.034350, 25.020260,
26.011780, 26.999230236, 27.992314761, 28.981800606, 29.978313789,
30.973761629, 31.973907274, 32.971725543, 33.973636257, 34.973314117,
35.97825968, 36.979608946, 37.984156827, 38.986179475, 39.991296951,
40.994335435, 42.001007913, 43.00619, 44.012990, 45.019220, 46.027380,
31.972070999, 26.027880, 27.018833, 28.004372763, 28.996608049,
29.984903249, 30.979554728, 31.972070999, 32.971458759, 33.967866902,
34.969032161, 35.96708076, 36.971125567, 37.971163317, 38.975134306,
39.975451728, 40.979582149, 41.981022419, 42.98715479, 43.99021339,
44.996508112, 46.000750, 47.008590, 48.014170, 49.023619, 34.968852682,
28.028510, 29.014110, 30.004770, 30.992413086, 31.985689901, 32.977451887,
33.973762819, 34.968852682, 35.968306981, 36.965902591, 37.968010425,
38.968008164, 39.970415472, 40.970684525, 41.973254804, 42.974054403,
43.978281071, 44.980286886, 45.98421004, 46.988710, 47.994950, 49.000320,
50.007840, 51.014490, 39.96238312251, 30.021560, 31.012123, 31.997637984,
32.989925709, 33.980271244, 34.975257585, 35.967545105, 36.96677632,
37.962732394, 38.964313231, 39.96238312251, 40.964500611, 41.963045736,
42.965636056, 43.964924033, 44.968039956, 45.968094129, 46.972186792,
47.974540, 48.980520, 49.984430, 50.991630, 51.996780, 53.004940,
38.963706679, 32.021920, 33.007260, 33.998410, 34.988009692, 35.981292235,
36.973375889, 37.969081184, 38.963706679, 39.963998475, 40.961825762,
41.96240281, 42.96071554, 43.961556804, 44.960699493, 45.961976864,
46.961678473, 47.965513535, 48.967450928, 49.972783355, 50.976380,
51.982610, 52.987120, 53.994200, 54.999710, 39.962590983, 34.014120,
35.004940, 35.993087063, 36.985870269, 37.976318452, 38.970719725,
39.962590983, 40.962278062, 41.958618014, 42.958766628, 43.955481754,
44.956186566, 45.953692587, 46.954546006, 47.952534177, 48.955674148,
49.957518962, 50.961499214, 51.9651, 52.970050, 53.974350, 54.980550,
55.985570, 56.992356, 44.955911909, 36.014920, 37.003050, 37.994700,
38.984790002, 39.977967407, 40.969251125, 41.965516429, 42.961150658,
43.959402752, 44.955911909, 45.95517189, 46.952407508, 47.952231468,
48.950023975, 49.952187685, 50.953603368, 51.956675468, 52.959610,
53.963264561, 54.968243949, 55.972870, 56.977790, 57.983710, 58.989220,
59.995710, 47.947946281, 38.009770, 39.001610, 39.990498838, 40.983145,
41.973030902, 42.968522499, 43.959690069, 44.958125616, 45.952631555,
46.951763088, 47.947946281, 48.947869982, 49.944791194, 50.946614955,
51.946897311, 52.949727171, 53.951052401, 54.955265056, 55.958199639,
56.963989137, 57.966970, 58.972930, 59.976760, 60.983200, 61.987490,
62.994420, 50.943959507, 40.011090, 40.999780, 41.991230, 42.980650,
43.97411, 44.965775808, 45.960200481, 46.95490894, 47.952253707,
48.948516101, 49.947158485, 50.943959507, 51.944775479, 52.944337979,
53.946439854, 54.947233701, 55.950530966, 56.952561432, 57.956834136,
58.960207407, 59.965026862, 60.968480, 61.973780, 62.977550, 63.983470,
64.987920, 51.940507472, 42.006430, 42.997710, 43.985549, 44.97964,
45.968358635, 46.962900046, 47.954031716, 48.951335721, 49.946044205,
50.944767431, 51.940507472, 52.940649386, 53.938880395, 54.940839672,
55.940653139, 56.943613013, 57.944353129, 58.948586367, 59.950076033,
60.954717204, 61.95661319, 62.961860, 63.964410, 64.970160, 65.973380,
66.979550, 54.938045141, 44.006870, 44.994510, 45.986720, 46.976100,
47.96852, 48.959618005, 49.95423823, 50.948210787, 51.945565464,
52.941290117, 53.940358854, 54.938045141, 55.93890491, 56.938285378,
57.939981549, 58.940440237, 59.942911246, 60.944652638, 61.94842822,
62.95023999, 63.95424909, 64.956336065, 65.961080, 66.964140, 67.969300,
68.972840, 55.934937475, 45.014578, 46.000810, 46.992890, 47.980504,
48.973610, 49.962988982, 50.956819538, 51.948113875, 52.945307942,
53.939610501, 54.938293357, 55.934937475, 56.935393969, 57.933275558,
58.934875464, 59.934071683, 60.936745281, 61.936767442, 62.940369091,
63.941201265, 64.94538027, 65.946780638, 66.950947244, 67.9537, 68.958780,
69.961460, 70.966720, 71.969620, 58.933195048, 47.011490, 48.001760,
48.989720, 49.981540, 50.970720, 51.963590, 52.954218896, 53.948459635,
54.941999029, 55.939839278, 56.936291373, 57.935752814, 58.933195048,
59.933817059, 60.932475763, 61.934050563, 62.933611611, 63.935809908,
64.93647846, 65.939762004, 66.940889529, 67.944873058, 68.94632, 69.951,
70.9529, 71.957810, 72.960240, 73.965380, 74.968330, 57.935342907,
48.019750, 49.009660, 49.995930, 50.987720, 51.975680, 52.968470,
53.957905495, 54.951330251, 55.942132022, 56.939793526, 57.935342907,
58.934346705, 59.930786372, 60.931056033, 61.928345115, 62.929669374,
63.927965959, 64.930084304, 65.929139334, 66.931569414, 67.931868789,
68.935610269, 69.9365, 70.940736283, 71.942092682, 72.946470, 73.948070,
74.952870, 75.955330, 76.960550, 77.963180, 62.929597474, 51.997180,
52.985550, 53.976710, 54.966050, 55.958560, 56.949211078, 57.944538499,
58.939498028, 59.93736503, 60.933457821, 61.932583745, 62.929597474,
63.929764183, 64.927789485, 65.928868813, 66.927730314, 67.929610889,
68.929429269, 69.932392343, 70.932676833, 71.935820307, 72.936675282,
73.939874862, 74.9419, 75.945275026, 76.947850, 77.951960, 78.954560,
79.960870, 63.929142222, 53.992950, 54.983980, 55.972380, 56.964788,
57.954591555, 58.949263764, 59.941827035, 60.939510635, 61.934329764,
62.933211566, 63.929142222, 64.929240984, 65.926033419, 66.927127345,
67.924844154, 68.926550281, 69.925319274, 70.927721599, 71.926857951,
72.929779104, 73.929458609, 74.932936741, 75.93329357, 76.936958967,
77.938440216, 78.942652, 79.944342348, 80.950480, 81.954420, 82.961030,
68.925573587, 55.994910, 56.982930, 57.974250, 58.963370, 59.957060,
60.949446287, 61.944175238, 62.939294196, 63.936838747, 64.932734754,
65.93158901, 66.928201703, 67.927980084, 68.925573587, 69.926021972,
70.924701349, 71.926366268, 72.925174682, 73.926945762, 74.926500246,
75.928827626, 76.9291543, 77.93160818, 78.93289326, 79.936515781,
80.937752355, 81.942990, 82.946980, 83.952650, 84.957000, 85.963120,
73.921177767, 57.991010, 58.981750, 59.970190, 60.963790, 61.954650,
62.949640, 63.941653, 64.939436406, 65.933843453, 66.93273407,
67.92809424, 68.927964533, 69.924247381, 70.924950954, 71.922075815,
72.923458945, 73.921177767, 74.922858948, 75.921402557, 76.923548591,
77.922852739, 78.925400995, 79.925372392, 80.928820467, 81.929549725,
82.934620, 83.937470, 84.943030, 85.946490, 86.952510, 87.956910,
88.963830, 74.921596478, 59.993130, 60.980620, 61.973200, 62.963690,
63.957572, 64.949564, 65.94471, 66.939186071, 67.936769069, 68.932273675,
69.930924826, 70.927112428, 71.926752283, 72.923824844, 73.923928692,
74.921596478, 75.922394021, 76.920647286, 77.921827281, 78.920947934,
79.922533816, 80.922132287, 81.924504067, 82.924980024, 83.929058,
84.932020, 85.936500, 86.939900, 87.944940, 88.949390, 89.955500,
90.960430, 91.966800, 79.916521271, 64.964660, 65.955210, 66.950090,
67.941798, 68.939557817, 69.933390644, 70.932241822, 71.927112352,
72.926765345, 73.922476436, 74.922523368, 75.919213597, 76.919914038,
77.91730909, 78.918499098, 79.916521271, 80.917992474, 81.916699401,
82.919118473, 83.918462354, 84.922245053, 85.924271579, 86.928521358,
87.931423998, 88.936450, 89.939960, 90.945960, 91.949920, 92.956290,
93.960490, 78.918337087, 66.964790, 67.958516, 68.950106, 69.944792,
70.93874, 71.936644572, 72.931691524, 73.929891034, 74.925776207,
75.924541469, 76.921379082, 77.921145706, 78.918337087, 79.918529296,
80.916290563, 81.916804119, 82.915180421, 83.916478974, 84.915608403,
85.918797577, 86.920711324, 87.924065926, 88.926385334, 89.930627737,
90.933968095, 91.939258714, 92.943050, 93.948680, 94.952870, 95.958530,
96.962800, 85.910610729, 68.965180, 69.955259, 70.949625738, 71.942092038,
72.939289195, 73.933084369, 74.930945746, 75.925910078, 76.92467,
77.920364783, 78.920082431, 79.916378965, 80.916592015, 81.9134836,
82.914136099, 83.911506687, 84.912527331, 85.910610729, 86.913354862,
87.914446969, 88.917630581, 89.919516555, 90.923445215, 91.92615621,
92.931274357, 93.934360, 94.939840, 95.943070, 96.948560, 97.951910,
98.957600, 99.961140, 84.911789737, 70.965320, 71.959080, 72.950561,
73.944264751, 74.93857, 75.935072226, 76.930408, 77.928141, 78.92398946,
79.92251925, 80.918995913, 81.918208598, 82.915109701, 83.914384821,
84.911789737, 85.911167419, 86.909180526, 87.911315588, 88.912278016,
89.914801694, 90.916536958, 91.9197289, 92.922041876, 93.926404946,
94.929302889, 95.934272637, 96.937351916, 97.941790668, 98.945379283,
99.949870, 100.953196445, 101.958870, 87.905612124, 72.965970,
73.956310, 74.949949568, 75.941766782, 76.937944782, 77.93218,
78.929708, 79.924521013, 80.923211846, 81.918401639, 82.917556701,
83.913425275, 84.912932803, 85.909260204, 86.908877124, 87.905612124,
88.907450675, 89.907737888, 90.910203095, 91.911037858, 92.914025634,
93.915361312, 94.919358766, 95.921696802, 96.926152923, 97.928452934,
98.933240926, 99.935351911, 100.940517888, 101.943018987, 102.948950,
103.952330, 104.958580, 88.905848295, 75.958450, 76.949645, 77.943610,
78.937351634, 79.93428, 80.929127468, 81.926792451, 82.922354243,
83.920388264, 84.916433039, 85.914885576, 86.91087573, 87.909501146,
88.905848295, 89.907151886, 90.907304791, 91.908949143, 92.909582713,
93.911595245, 94.912820621, 95.915891343, 96.918133995, 97.92220302,
98.924636204, 99.927756586, 100.93031385, 101.933555695, 102.936730,
103.941050, 104.944870, 105.949790, 106.954140, 107.959480,
89.904704416, 77.955230, 78.949160, 79.9404, 80.937210026, 81.931087,
82.928653801, 83.923250, 84.921471182, 85.916473591, 86.914816252,
87.910226904, 88.9088895, 89.904704416, 90.905645767, 91.905040847,
92.906476006, 93.906315192, 94.9080426, 95.908273386, 96.910953109,
97.912734892, 98.916512106, 99.917761889, 100.921140415, 101.922981285,
102.926599606, 103.928780, 104.933050, 105.935910, 106.940750,
107.943960, 108.949240, 109.952870, 92.906378058, 80.949030,
81.943130, 82.936705382, 83.933570, 84.927912447, 85.925038326,
86.920361108, 87.918332163, 88.913418245, 89.911264845,
90.906996243, 91.907193888, 92.906378058, 93.907283888, 94.906835792,
95.908100647, 96.908098556, 97.910328412, 98.911618375, 99.914181619,
100.915252025, 101.918037614, 102.919143842, 103.922464701,
104.923936545, 105.927970, 106.930310, 107.934840, 108.937630,
109.942440, 110.945650, 111.950830, 112.954700, 97.905408169, 82.948740,
83.940090, 84.936550, 85.930695904, 86.927326502, 87.921953241,
88.919480009, 89.913936896, 90.911750194, 91.906810991, 92.90681261,
93.905088269, 94.905842129, 95.904679477, 96.906021465, 97.905408169,
98.90771187, 99.907477336, 100.910347001, 101.91029736, 102.913207142,
103.913763625, 104.91697461, 105.918136802, 106.921692604, 107.923453,
108.927810, 109.929730, 110.934410, 111.936840, 112.941880, 113.944920,
114.950290, 98.906254747, 84.948830, 85.942880, 86.936530, 87.932678,
88.927167, 89.923556564, 90.918427639, 91.915260166, 92.910248984,
93.909657002, 94.907657084, 95.907871383, 96.906365358, 97.907215966,
98.906254747, 99.90765778, 100.907314659, 101.909215019, 102.909181351,
103.911447454, 104.911660566, 105.914357927, 106.915079572, 107.918461226,
108.919982665, 109.923820483, 110.92569283, 111.929146493, 112.931590,
113.935880, 114.938690, 115.943370, 116.946480, 117.951480, 101.904349312,
86.949180, 87.940260, 88.936110, 89.929890, 90.926292, 91.920120,
92.917052034, 93.911359711, 94.910412929, 95.907597835, 96.9075547,
97.905287132, 98.905939302, 99.904219476, 100.905582087, 101.904349312,
102.906323847, 103.905432701, 104.907752866, 105.907329433,
106.909905089, 107.910173465, 108.913203233, 109.914136041, 110.917696,
111.918965, 112.922487194, 113.924281, 114.928686173, 115.930810,
116.935580, 117.937820, 118.942840, 119.945310, 102.905504292,
88.948837, 89.942870, 90.936550, 91.931980, 92.925740, 93.921698,
94.91589874, 95.914460631, 96.911336797, 97.910708158, 98.908132104,
99.90812155, 100.906163625, 101.906843196, 102.905504292, 103.906655518,
104.905693821, 105.907287135, 106.906748423, 107.908728018, 108.908737289,
109.911136411, 110.911585913, 111.914394159, 112.915530627, 113.918806,
114.920334, 115.924062, 116.925980, 117.930070, 118.932110, 119.936410,
120.938720, 121.943210, 105.903485715, 90.949110, 91.940420, 92.935910,
93.928770, 94.924690, 95.918164359, 96.916479073, 97.912720902,
98.911767833, 99.908505886, 100.908289242, 101.905608544, 102.906087307,
103.904035834, 104.90508492, 105.903485715, 106.905133481, 107.903891701,
108.905950451, 109.905153254, 110.907670734, 111.907314058, 112.910152908,
113.910362638, 114.913683824, 115.914158662, 116.917841338, 117.9189843,
118.923110, 119.924691878, 120.928870, 121.930550, 122.934930, 123.936880,
106.90509682, 92.949780, 93.942780, 94.935480, 95.930680, 96.923972412,
97.921566201, 98.917597178, 99.916104255, 100.912802233, 101.911685,
102.90897272, 103.908629157, 104.906528661, 105.906668921, 106.90509682,
107.905955556, 108.904752292, 109.906107231, 110.905291157, 111.907004814,
112.906566579, 113.908803704, 114.908762698, 115.911359933, 116.911684562,
117.914582768, 118.915665059, 119.918787384, 120.919848046, 121.923530,
122.924900, 123.928640, 124.930430, 125.934500, 126.936770, 127.941170,
128.943690, 129.950448, 113.90335854, 94.949870, 95.939770, 96.934940,
97.927395546, 98.925010, 99.920289525, 100.918681538, 101.914462258,
102.913419246, 103.909849475, 104.909467905, 105.90645941, 106.906617928,
107.904183683, 108.904982293, 109.90300207, 110.904178107, 111.902757809,
112.904401662, 113.90335854, 114.905430969, 115.904755809, 116.907218618,
117.90691453, 118.909921597, 119.909850129, 120.912977363, 121.913332432,
122.917002999, 123.917647616, 124.92124637, 125.922353321, 126.926443864,
127.927762285, 128.932150, 129.933901937, 130.940670, 131.945550,
114.903878484, 96.949540, 97.942140, 98.934220, 99.931110851,
100.926340, 101.924090238, 102.919914188, 103.918296171, 104.91467354,
105.913465411, 106.9102951, 107.90969818, 108.907150507, 109.907165274,
110.905103278, 111.905532331, 112.904057761, 113.904913876,
114.903878484, 115.905259703, 116.904513564, 117.906354367, 118.90584535,
119.907959608, 120.907845822, 121.91027601, 122.910438276, 123.913175231,
124.913600588, 125.916463857, 126.917353091, 127.920172328, 128.92169698,
129.924970049, 130.926851767, 131.93299026, 132.937810, 133.944150,
134.949330, 119.902194676, 98.949330, 99.939044343, 100.936060,
101.930295324, 102.928100, 103.923143223, 104.921349437, 105.91688062,
106.915644329, 107.911925378, 108.911283214, 109.907842791, 110.90773446,
111.904818207, 112.905170577, 113.902778869, 114.903342397, 115.90174053,
116.902951656, 117.901603167, 118.90330763, 119.902194676, 120.90423548,
121.903439046, 122.905720838, 123.905273946, 124.907784125, 125.90765328,
126.910360024, 127.910536624, 128.913479, 129.913967295, 130.916999769,
131.917815713, 132.923829249, 133.928291765, 134.934730, 135.939340,
136.945990, 120.903815686, 102.939690, 103.936472, 104.931486348,
105.928791, 106.924150, 107.922160, 108.918132426, 109.916753, 110.913163,
111.912398009, 112.909371672, 113.909269, 114.906598, 115.906793629,
116.904835941, 117.905528731, 118.903942009, 119.905072427, 120.903815686,
121.905173651, 122.90421397, 123.905935743, 124.905253818, 125.90724748,
126.906923609, 127.909169001, 128.909148442, 129.911656324, 130.911982275,
131.914466896, 132.91525163, 133.920379744, 134.925165771, 135.930350,
136.935310, 137.940790, 138.945980, 129.906224399, 104.943640,
105.937504237, 106.935006, 107.929444597, 108.927415515, 109.922407316,
110.921110692, 111.917013672, 112.915891, 113.912089, 114.911902,
115.90846, 116.908644719, 117.905827581, 118.906403645, 119.904020222,
120.904936424, 121.903043898, 122.904270029, 123.902817896, 124.904430731,
125.903311696, 126.905226336, 127.904463056, 128.906598238, 129.906224399,
130.908523864, 131.90855316, 132.910955306, 133.911368737, 134.916448592,
135.920101246, 136.925322954, 137.929220, 138.934730, 139.938850,
140.944650, 141.949080, 126.904472681, 107.943475, 108.938149417,
109.935242, 110.930276, 111.927970, 112.923640583, 113.921850, 114.918048,
115.916808633, 116.91365, 117.913074, 118.910074, 119.910048173,
120.907366811, 121.907589284, 122.905588965, 123.906209852, 124.904630164,
125.905624153, 126.904472681, 127.905809443, 128.904987722, 129.906674247,
130.906124609, 131.907997381, 132.907796939, 133.909744465, 134.910048121,
135.914653993, 136.91787084, 137.922349591, 138.926099478, 139.931000,
140.935030, 141.940180, 142.944560, 143.949990, 131.904153457, 109.944278068,
110.941602, 111.935623112, 112.933341174, 113.927980306, 114.92629392,
115.921581087, 116.920358735, 117.916178655, 118.915410688, 119.911784244,
120.911461829, 121.908367632, 122.90848191, 123.905893003, 124.906395464,
125.904273634, 126.905183723, 127.903531275, 128.904779435, 129.903508007,
130.905082362, 131.904153457, 132.905910722, 133.905394464, 134.907227495,
135.907218794, 136.911562125, 137.913954475, 138.918792936, 139.921640943,
140.926648049, 141.92970959, 142.935110, 143.938510, 144.944070, 145.947750,
146.953560, 132.905451932, 111.950301, 112.944493274, 113.941450, 114.935910,
115.933367, 116.928670701, 117.926559494, 118.922377304, 119.920677253,
120.917229209, 121.916113434, 122.912996036, 123.912257798, 124.90972827,
125.909451977, 126.907417525, 127.907748866, 128.906064426, 129.906708552,
130.905463926, 131.90643426, 132.905451932, 133.906718475, 134.905977008,
135.907311576, 136.907089473, 137.911016704, 138.913363999, 139.917282354,
140.920045752, 141.924298927, 142.92735175, 143.932076914, 144.93552617,
145.940289423, 146.944155008, 147.949218153, 148.952930, 149.958170,
150.962190, 137.905247237, 113.950675405, 114.947370, 115.941380,
116.938499, 117.933040, 118.930659661, 119.926044974, 120.924054499,
121.919904, 122.918781036, 123.915093603, 124.914472912, 125.911250177,
126.911093797, 127.908317698, 128.908679439, 129.906320811, 130.906941118,
131.905061288, 132.90600749, 133.904508383, 134.905688591, 135.904575945,
136.905827384, 137.905247237, 138.908841341, 139.910604505, 140.914411009,
141.91645341, 142.920626719, 143.922952853, 144.927627032, 145.930219572,
146.934945, 147.937720047, 148.942580, 149.945680, 150.950810, 151.954270,
152.959610, 138.906353267, 116.950068, 117.946730, 118.940990, 119.938070,
120.933010, 121.930710, 122.926240, 123.924574275, 124.920816034,
125.919512667, 126.916375448, 127.915585177, 128.912692815, 129.912368724,
130.91007, 131.910101145, 132.908218, 133.908514011, 134.906976844,
135.907635536, 136.906493598, 137.90711193, 138.906353267, 139.909477645,
140.910962152, 141.91407913, 142.91606272, 143.919599647, 144.921645401,
145.92579346, 146.928235284, 147.932228868, 148.934734, 149.938770,
150.941720, 151.946250, 152.949620, 153.954500, 154.958350, 139.905438706,
118.952760, 119.946640, 120.943420, 121.937910, 122.935400, 123.930410,
124.928440, 125.923971, 126.922731, 127.918911, 128.918102, 129.914736,
130.914422, 131.911460487, 132.91151502, 133.908924821, 134.909151396,
135.907172422, 136.907805577, 137.905991321, 138.906652651, 139.905438706,
140.90827627, 141.909244205, 142.91238591, 143.913647336, 144.917233135,
145.918759009, 146.922673954, 147.92443241, 148.928399883, 149.930408931,
150.933976196, 151.936540, 152.940580, 153.943420, 154.948040, 155.951260,
156.956340, 140.907652769, 120.955364, 121.951810, 122.945960, 123.942960,
124.937830, 125.935310, 126.930830, 127.928791, 128.925095, 129.92359,
130.920259, 131.919255, 132.916330532, 133.915711737, 134.913111745,
135.912691611, 136.910705455, 137.910754636, 138.908938399, 139.909075874,
140.907652769, 141.910044806, 142.910816926, 143.913305245, 144.9145117,
145.917644336, 146.918995992, 147.922135026, 148.923717651, 149.926672997,
150.928318618, 151.931499225, 152.933838905, 153.937518153, 154.940120,
155.944270, 156.947430, 157.951980, 158.955500, 141.907723297, 123.952230,
124.948880, 125.943220, 126.940500, 127.935390, 128.933188, 129.928506,
130.927247, 131.923321237, 132.922348, 133.918790181, 134.91818116,
135.914976035, 136.914567137, 137.911949961, 138.911978288, 139.909552,
140.909609854, 141.907723297, 142.90981429, 143.910087274, 144.912573636,
145.913116939, 146.916100441, 147.916893288, 148.920148842, 149.920890888,
150.923828929, 151.924682219, 152.927698232, 153.929477307, 154.932932,
155.935018114, 156.939030, 157.941600, 158.946090, 159.949090, 160.953880,
144.912749023, 125.957520, 126.951630, 127.948420, 128.943160, 129.940450,
130.935870, 131.933750, 132.929782, 133.928353, 134.924876, 135.923565829,
136.920479493, 137.919548281, 138.916804082, 139.916041789, 140.913555054,
141.912874471, 142.910932616, 143.912590843, 144.912749023, 145.914696305,
146.915138545, 147.917474618, 148.918334155, 149.920983561, 150.921206973,
151.923496795, 152.924116889, 153.926463943, 154.928101267, 155.931056736,
156.933039369, 157.936561407, 158.938970, 159.942990, 160.945860,
161.950290, 162.953680, 151.919732425, 127.958080, 128.954640, 129.948920,
130.946110, 131.940690, 132.938670, 133.933970, 134.93252, 135.928275527,
136.926971746, 137.923243961, 138.922296605, 139.918994687, 140.918476488,
141.915197641, 142.914628338, 143.911999478, 144.913410353, 145.9130409,
146.914897923, 147.914822674, 148.917184735, 149.917275539, 150.919932409,
151.919732425, 152.922097356, 153.922209273, 154.924640161, 155.925527887,
156.928358717, 157.929991317, 158.933211271, 159.935140, 160.938830,
161.941220, 162.945360, 163.948280, 164.952980, 152.921230339, 129.963569,
130.957753, 131.954370, 132.949240, 133.946510, 134.941820, 135.939600,
136.935570, 137.933709, 138.92979228, 139.928087607, 140.92493072,
141.923434945, 142.920297509, 143.918816823, 144.916265237, 145.917205817,
146.916746111, 147.918085895, 148.917931238, 149.919701819, 150.919850161,
151.921744534, 152.921230339, 153.922979237, 154.92289326, 155.924752249,
156.925423647, 157.927845302, 158.929088861, 159.931971, 160.933680,
161.937040, 162.939210, 163.942990, 164.945720, 165.949970, 166.953210,
157.924103912, 133.955370, 134.952570, 135.947340, 136.945020, 137.940120,
138.938240, 139.933674, 140.932126, 141.928116, 142.92674951, 143.922963,
144.921709252, 145.918310608, 146.91909442, 147.918114524, 148.919340915,
149.918658876, 150.920348482, 151.919790996, 152.921749543, 153.920865598,
154.922622022, 155.922122743, 156.923960135, 157.924103912, 158.926388658,
159.927054146, 160.929669211, 161.930984751, 162.933990, 163.935860,
164.939380, 165.941600, 166.945570, 167.948360, 168.952870, 158.925346757,
135.961380, 136.955980, 137.953160, 138.948290, 139.945805049, 140.941448,
141.938744, 142.935121, 143.933045, 144.929274, 145.927246584, 146.924044585,
147.924271701, 148.923245909, 149.923659686, 150.923102543, 151.924074438,
152.923434588, 153.924678019, 154.923505236, 155.924747213, 156.924024604,
157.925413137, 158.925346757, 159.927167606, 160.927569919, 161.929488234,
162.930647536, 163.933350838, 164.934880, 165.937991959, 166.940050,
167.943640, 168.946220, 169.950250, 170.953300, 163.929174751, 137.962490,
138.959540, 139.954010, 140.951350, 141.946366, 142.943830, 143.939254,
144.937425, 145.932845369, 146.9310915, 147.927149831, 148.927304787,
149.925585184, 150.926184601, 151.9247183, 152.92576467, 153.924424457,
154.925753775, 155.92428311, 156.925466095, 157.924409487, 158.925739214,
159.925197517, 160.926933364, 161.926798447, 162.928731159, 163.929174751,
164.931703333, 165.932806741, 166.935655462, 167.937128769, 168.940307614,
169.942390, 170.946200, 171.948760, 172.953000, 164.93032207, 139.968539,
140.963098, 141.959770, 142.954610, 143.951480, 144.947200, 145.944640,
146.940056, 147.937718, 148.933774771, 149.933496182, 150.931688142,
151.931713714, 152.930198789, 153.930601579, 154.929103491, 155.929839,
156.928256188, 157.928941007, 158.927711959, 159.928729478, 160.927854776,
161.929095504, 162.928733903, 163.930233507, 164.93032207, 165.932284162,
166.933132633, 167.935515708, 168.936872273, 169.939618929, 170.94146515,
171.944820, 172.947290, 173.951150, 174.954050, 165.930293061, 142.966340,
143.960380, 144.957390, 145.952000, 146.949490, 147.944550, 148.942306,
149.937913839, 150.937448903, 151.935050389, 152.935063492, 153.932783081,
154.933208949, 155.931064698, 156.931916, 157.929893474, 158.930684066,
159.929083292, 160.929995309, 161.928778264, 162.930032749, 163.929200229,
164.930726003, 165.930293061, 166.932048159, 167.932370224, 168.934590364,
169.935464312, 170.938029808, 171.939356113, 172.942400, 173.944230,
174.947770, 175.950080, 176.954050, 168.93421325, 144.970073, 145.966425,
146.960961, 147.957840, 148.952720, 149.949960, 150.94548349, 151.944422,
152.942012112, 153.941567808, 154.939199459, 155.938979933, 156.936973,
157.936979525, 158.934975, 159.935262801, 160.933549, 161.933994682,
162.932651124, 163.93356, 164.932435492, 165.933554131, 166.932851622,
167.934172776, 168.93421325, 169.935801397, 170.93642944, 171.938400044,
172.939603607, 173.942168605, 174.943836853, 175.946994685, 176.949040,
177.952640, 178.955340, 173.938862089, 147.967420, 148.964040, 149.958420,
150.955400769, 151.950288919, 152.949480, 153.946393928, 154.945782332,
155.942818215, 156.942627848, 157.939865617, 158.940050099, 159.937552344,
160.937901678, 161.93576821, 162.936334305, 163.934489416, 164.935279,
165.933882042, 166.934949605, 167.933896895, 168.935189802, 169.934761837,
170.936325799, 171.936381469, 172.938210787, 173.938862089, 174.94127645,
175.942571683, 176.945260822, 177.94664668, 178.950170, 179.952330,
180.956150, 174.940771819, 149.973228, 150.967577, 151.964120,
152.958767331, 153.957522, 154.954316216, 155.953032523, 156.9500983,
157.949313283, 158.946628776, 159.946033, 160.943572, 161.943277288,
162.941179, 163.941339, 164.939406724, 165.939859, 166.93827,
167.938739111, 168.937651439, 169.938474968, 170.937913136, 171.939085669,
172.938930602, 173.94033748, 174.940771819, 175.94268631, 176.943758055,
177.945954559, 178.947327443, 179.94988116, 180.951970, 181.955040,
182.957570, 183.960910, 179.946549953, 152.970690, 153.964860, 154.963390,
155.959364025, 156.958396, 157.954799366, 158.95399487, 159.950684379,
160.950274844, 161.947210498, 162.947089, 163.944367284, 164.944567,
165.94218, 166.9426, 167.940568, 168.941259, 169.939609, 170.940492,
171.939448301, 172.940513, 173.940046178, 174.941509181, 175.941408631,
176.943220651, 177.943698766, 178.945816145, 179.946549953, 180.949101246,
181.950554096, 182.953530439, 183.955446515, 184.958820, 185.960890,
186.964590, 187.966850, 180.947995763, 154.974592, 155.972303,
156.968192445, 157.966699, 158.963018173, 159.961486056, 160.958417,
161.957291859, 162.954330271, 163.953534, 164.950772514, 165.950512,
166.948093, 167.948047, 168.946011, 169.946175, 170.944476, 171.944895,
172.94375, 173.944454, 174.943737, 175.944857, 176.944472403,
177.945778221, 178.945929535, 179.947464831, 180.947995763, 181.950151849,
182.951372616, 183.954007966, 184.955559375, 185.958552023, 186.960530,
187.963700, 188.965830, 189.969230, 183.950931188, 157.974562, 158.972918,
159.968478805, 160.967357, 161.963497417, 162.962523542, 163.958954382,
164.958279949, 165.955027253, 166.954816014, 167.951808394, 168.95177879,
169.949228482, 170.949451, 171.947292, 172.947689, 173.946079, 174.946717,
175.945634, 176.946643, 177.945876236, 178.947070447, 179.946704459,
180.948197248, 181.948204156, 182.950222951, 183.950931188, 184.953419264,
185.954364127, 186.957160466, 187.958489105, 188.961912868, 189.963181378,
190.966600, 191.968170, 186.955753109, 159.982115, 160.977589119,
161.976002, 162.972080535, 163.970323, 164.967088557, 165.965808,
166.962601, 167.961572608, 168.958791096, 169.958220071, 170.955716,
171.955422961, 172.953243, 173.953115, 174.951381, 175.951623, 176.950328,
177.950989, 178.949987641, 179.950789084, 180.950067916, 181.95121008,
182.950819841, 183.952520756, 184.952954982, 185.954986084, 186.955753109,
187.958114438, 188.959229007, 189.961817977, 190.963125242, 191.965960,
192.967470, 193.970420, 191.96148069, 161.984431, 162.982690,
163.978035649, 164.976762, 165.972690753, 166.971547969, 167.967803678,
168.96701927, 169.963577028, 170.963184819, 171.960023303, 172.959808409,
173.957062202, 174.956945835, 175.954806, 176.954965324, 177.953251241,
178.953816017, 179.952378803, 180.953244, 181.952110186, 182.953126102,
183.952489071, 184.954042265, 185.953838158, 186.955750458, 187.955838228,
188.95814747, 189.958447048, 190.960929718, 191.96148069, 192.964151563,
193.965182083, 194.968126661, 195.969639333, 192.96292643, 163.992201,
164.987520, 165.985824, 166.981665156, 167.979881, 168.976294942, 169.974965,
170.971626042, 171.970456, 172.967501739, 173.966861045, 174.964112895,
175.963648688, 176.9613015, 177.961082, 178.959122266, 179.959229446,
180.957625297, 181.958076296, 182.956846458, 183.957476, 184.956698,
185.957946104, 186.957363361, 187.958853121, 188.958718935, 189.960545968,
190.960594046, 191.962605012, 192.96292643, 193.965078378, 194.965979573,
195.968396542, 196.969653285, 197.972280, 198.973804583, 194.964791134,
165.994855, 166.992979, 167.988150742, 168.986715, 169.982495289,
170.981244542, 171.977347128, 172.976444754, 173.972818767, 174.972420552,
175.968944622, 176.968469481, 177.965648724, 178.965363404, 179.963031477,
180.963097285, 181.961170656, 182.961596703, 183.959922251, 184.960619,
185.959350813, 186.960587, 187.959395391, 188.960833686, 189.959931655,
190.961676661, 191.961038005, 192.962987401, 193.962680253, 194.964791134,
195.964951521, 196.967340182, 197.96789279, 198.970593094, 199.971440677,
200.974512868, 201.975740, 196.966568662, 168.998080, 169.996122,
170.991878881, 171.990035, 172.98623738, 173.984761, 174.981274107,
175.980099, 176.976864908, 177.97603192, 178.973212812, 179.972521124,
180.970079048, 181.969617874, 182.967593034, 183.967451524, 184.965789411,
185.965952703, 186.964567541, 187.965323661, 188.963948286, 189.964700339,
190.963704225, 191.964812953, 192.964149715, 193.96536525, 194.96503464,
195.966569813, 196.966568662, 197.968242303, 198.968765193, 199.970725647,
200.97165724, 201.973805838, 202.975154542, 203.977724, 204.979870,
201.970643011, 171.003760, 171.998832686, 172.997242, 173.992863695,
174.99142327, 175.98735458, 176.986279158, 177.982483143, 178.981833861,
179.978266394, 180.977819311, 181.974689964, 182.974449841, 183.971713051,
184.971899086, 185.96936179, 186.969814236, 187.967577049, 188.968190034,
189.966322449, 190.967157105, 191.965634327, 192.966665421, 193.965439409,
194.966720113, 195.965832649, 196.967212908, 197.966769032, 198.968279932,
199.968326004, 200.970302268, 201.970643011, 202.972872484, 203.973493933,
204.976073386, 205.977514066, 206.982588545, 207.985940, 208.991040,
209.994510, 204.974427541, 176.000590, 176.996427286, 177.994897,
178.991089082, 179.989906, 180.986257447, 181.985667104, 182.982192802,
183.981873122, 184.978791305, 185.978325, 186.975905897, 187.976009782,
188.973588428, 189.973877149, 190.971786154, 191.972225, 192.970672,
193.9712, 194.969774335, 195.970481151, 196.969574511, 197.970483495,
198.969877, 199.970962672, 200.970818891, 201.972105808, 202.97234422,
203.973863522, 204.974427541, 205.97611032, 206.977419429, 207.9820187,
208.985358952, 209.990073689, 210.993477, 211.998228, 207.976652071,
178.003830191, 179.002150, 179.997918173, 180.996623958, 181.992671842,
182.991874629, 183.988142339, 184.987609944, 185.984238945, 186.98391837,
187.980874338, 188.980807, 189.978081517, 190.978265, 191.975785171,
192.976173234, 193.97401207, 194.97454205, 195.972774109, 196.973431124,
197.972033959, 198.97291665, 199.971826675, 200.972884511, 201.972159133,
202.973390521, 203.973043589, 204.974481755, 205.974465278, 206.975896887,
207.976652071, 208.98109012, 209.984188527, 210.988736964, 211.991897543,
212.996581499, 213.999805408, 215.004807, 208.980398734, 184.001124,
184.997625, 185.996597625, 186.993157835, 187.992265154, 188.989199012,
189.988295129, 190.985786119, 191.985457954, 192.982959771, 193.98283396,
194.980650737, 195.980666509, 196.978864454, 197.979206, 198.977671961,
199.978131829, 200.977009036, 201.977742324, 202.976876001, 203.977812736,
204.977389366, 205.97849913, 206.978470679, 207.979742196, 208.980398734,
209.984120371, 210.98726946, 211.991285724, 212.994384666, 213.998711539,
215.001769776, 216.006305943, 217.009470, 218.014316, 208.982430435,
187.999422048, 188.998480562, 189.995101185, 190.994574485, 191.991335149,
192.991025275, 193.988185606, 194.988110728, 195.98553458, 196.98565963,
197.983388616, 198.983666063, 199.981798604, 200.982259764, 201.980757541,
202.981420103, 203.980318121, 204.981203322, 205.980481099, 206.981593173,
207.981245702, 208.982430435, 209.982873673, 210.986653154, 211.988867969,
212.99285728, 213.99520135, 214.999419988, 216.001915035, 217.006334796,
218.008973037, 219.013744, 220.016602, 210.987496271, 192.999843112,
193.998725085, 194.996268098, 195.995788077, 196.993189215, 197.992837202,
198.990532254, 199.990351264, 200.988416999, 201.988630236, 202.986941984,
203.987251326, 204.986074483, 205.986667036, 206.985783502, 207.986589977,
208.986173143, 209.98714771, 210.987496271, 211.990744771, 212.992936646,
213.996371733, 214.99865257, 216.002423257, 217.004718822, 218.008694336,
219.011161691, 220.015407682, 221.018050, 222.022330, 223.025190,
222.017577738, 195.005437696, 196.002115223, 197.001584351, 197.998678663,
198.998370297, 199.9956993, 200.995628335, 201.993263492, 202.993386687,
203.99142874, 204.991718799, 205.990214104, 206.990734225, 207.98964247,
208.990414742, 209.989696216, 210.990600523, 211.990703529, 212.993882668,
213.995362554, 214.998745483, 216.00027437, 217.003927675, 218.005601256,
219.009480204, 220.011393981, 221.015536782, 222.017577738, 223.021790,
224.024090, 225.028440, 226.030890, 227.035407, 228.037986, 222.01755173,
199.007258147, 200.00657249, 201.003860867, 202.003372847, 203.000924647,
204.000653204, 204.99859396, 205.998666066, 206.996949414, 207.997138783,
208.995953555, 209.996407738, 210.995536544, 211.996202244, 212.996189081,
213.998971145, 215.000341497, 216.00319799, 217.004631951, 218.007578322,
219.009252149, 220.012327405, 221.014254762, 222.01755173, 223.019735857,
224.023249951, 225.025565414, 226.029386231, 227.031835938, 228.035729,
229.038450228, 230.042510, 231.045440, 232.049772, 228.031070292,
202.009890686, 203.009271619, 204.006499668, 205.00626857, 206.00382727,
207.003798105, 208.00183994, 209.001991373, 210.000494978, 211.000897987,
211.999794499, 213.000383959, 214.000107894, 215.002719834, 216.003533035,
217.006320327, 218.00714023, 219.010085078, 220.011028384, 221.013917338,
222.01537453, 223.018502171, 224.020211821, 225.023611564, 226.025409823,
227.029177842, 228.031070292, 229.034957577, 230.037056394, 231.041220,
232.043638, 233.048060, 234.050704, 227.027752127, 206.01450498,
207.011949748, 208.011551551, 209.009494863, 210.009435986, 211.007734835,
212.007813822, 213.006607643, 214.006901798, 215.006453625, 216.008720075,
217.009346914, 218.011641453, 219.012420389, 220.014762979, 221.015591248,
222.017843851, 223.019137468, 224.021722866, 225.023229585, 226.026098089,
227.027752127, 228.031021112, 229.033015243, 230.036294178, 231.038558786,
232.042027438, 233.044550, 234.048420, 235.051232, 236.055296,
232.038055325, 209.017715682, 210.015075342, 211.014928413, 212.012980288,
213.01301014, 214.01149977, 215.01173033, 216.011062115, 217.013114328,
218.013284499, 219.015536895, 220.015747762, 221.018183674, 222.018468121,
223.020811448, 224.021466895, 225.023951021, 226.024903069, 227.02770407,
228.028741127, 229.03176243, 230.033133843, 231.036304343, 232.038055325,
233.041581843, 234.04360123, 235.047510074, 236.049870, 237.053894,
238.056496, 231.03588399, 212.023204138, 213.02110934, 214.020918417,
215.019185865, 216.019109564, 217.018323986, 218.020041889, 219.019883143,
220.021875303, 221.021877983, 222.023742, 223.023962273, 224.025625738,
225.026130678, 226.027947753, 227.028805072, 228.031051376, 229.032096793,
230.034540754, 231.03588399, 232.038591592, 233.040247277, 234.043308058,
235.045443615, 236.048681284, 237.051145659, 238.05450271, 239.057260,
240.060980, 238.050788247, 217.024368791, 218.023535671, 219.02491916,
220.024723, 221.026399, 222.026086, 223.0277386, 224.027604778,
225.029390717, 226.029338702, 227.031156367, 228.031374006, 229.033505939,
230.033939784, 231.036293704, 232.037156152, 233.039635207, 234.040952088,
235.043929918, 236.045568006, 237.048730184, 238.050788247, 239.054293299,
240.056591988, 241.060330, 242.062931, 237.048173444, 225.033913933,
226.035145, 227.034956789, 228.036180, 229.036263808, 230.037827597,
231.038245085, 232.040108, 233.040740546, 234.042895038, 235.044063267,
236.0465696, 237.048173444, 238.050946405, 239.052939025, 240.056162182,
241.058252431, 242.06164118, 243.064279, 244.067850, 242.058742611,
228.038742328, 229.040150212, 230.039649886, 231.041101107, 232.041187097,
233.042997375, 234.043317076, 235.04528605, 236.046057964, 237.048409658,
238.049559894, 239.052163381, 240.053813545, 241.056851456, 242.058742611,
243.062003092, 244.064203907, 245.067747154, 246.070204627, 247.074070,
243.06138108, 231.045560, 232.046590, 233.046348, 234.047809, 235.047946,
236.049579, 237.049996, 238.051984324, 239.053024479, 240.055300179,
241.056829144, 242.059549159, 243.06138108, 244.064284847, 245.066452114,
246.069774619, 247.072093, 248.075752, 249.078480, 247.07035354,
233.050771232, 234.050159841, 235.051434, 236.051413, 237.052901,
238.053028697, 239.054957, 240.055529539, 241.057653001, 242.058835824,
243.061389114, 244.062752578, 245.065491249, 246.067223662, 247.07035354,
248.072348508, 249.075953413, 250.078356959, 251.082284605, 252.084870,
247.07030708, 235.056580, 236.057330, 237.057003, 238.058281, 239.058279,
240.059759, 241.060230, 242.061981, 243.063007572, 244.065180774,
245.066361616, 246.068672947, 247.07030708, 248.073086, 249.074986657,
250.07831652, 251.080760172, 252.084310, 253.086880, 254.090600,
251.079586788, 237.062070, 238.061410, 239.062422, 240.062302, 241.063726,
242.063701552, 243.065427, 244.066000689, 245.068048612, 246.068805309,
247.071000589, 248.072184861, 249.074853537, 250.076406066, 251.079586788,
252.081625846, 253.085133145, 254.087322909, 255.091046, 256.093440,
252.082978512, 240.068920, 241.068538, 242.069745, 243.069548, 244.070883,
245.071324, 246.072896, 247.073656, 248.075471, 249.076411, 250.078612,
251.079992142, 252.082978512, 253.084824697, 254.088022021, 255.090273122,
256.093598, 257.095979, 258.099520, 257.095104724, 242.073430, 243.074353,
244.074084, 245.075385, 246.075299023, 247.076847, 248.077194714,
249.079034, 250.079521264, 251.081575017, 252.082466855, 253.085185236,
254.08685422, 255.089962202, 256.091773117, 257.095104724, 258.097076,
259.100595, 260.102678, 258.098431319, 245.080829, 246.081886, 247.081635,
248.082823, 249.083013, 250.084420, 251.084839, 252.086560, 253.087280,
254.089656, 255.091082705, 256.094059025, 257.095541368, 258.098431319,
259.100509, 260.103652, 261.105721, 262.108865, 255.093241131, 248.086596,
249.087833, 250.087510, 251.089012, 252.088976521, 253.090678,
254.090955253, 255.093241131, 256.094282666, 257.09687719, 258.098207,
259.101031, 260.102643, 261.105749, 262.107301, 263.110552, 264.112345,
260.105504, 251.094360, 252.095371, 253.095210, 254.096454, 255.096681,
256.098629, 257.099555, 258.101814, 259.102901, 260.105504, 261.106883,
262.109634, 263.111293, 264.114038, 265.115839, 266.119305, 263.112547,
253.100689, 254.100184, 255.101340, 256.101166194, 257.102990,
258.103489, 259.105637, 260.106440, 261.108766556, 262.109925, 263.112547,
264.113985, 265.116704, 266.117956, 267.121529, 268.123644, 255.107398,
255.107398, 256.108127, 257.107722, 258.109231, 259.109610, 260.111300,
261.112056, 262.114084, 263.114988, 264.117404, 265.118601, 266.121029,
267.122377, 268.125445, 269.127460, 270.130712, 259.114500, 258.113168,
259.114500, 260.114422071, 261.116117, 262.116398, 263.118322, 264.118931,
265.121114693, 266.122065, 267.124425, 268.125606, 269.128755, 270.130329,
271.133472, 272.135158, 273.138220, 262.122892, 260.121970, 261.121664,
262.122892, 263.123035, 264.124604, 265.125147, 266.126942, 267.127650,
268.129755, 269.130694, 270.133616, 271.135179, 272.138032, 273.139618,
274.142440, 275.144250, 263.128558, 263.128558, 264.128394885, 265.130085,
266.130097, 267.131789, 268.132162, 269.134056, 270.134650, 271.137657,
272.139052, 273.141986, 274.143131, 275.145952, 276.147208, 277.149841,
265.136151, 265.136151, 266.137299, 267.137307, 268.138728, 269.139055,
270.140657, 271.141139, 272.143738, 273.144913, 274.147492, 275.148647,
276.151156, 277.152420, 278.154812, 279.156193, 281.162061, 267.144341,
268.143795, 269.145124, 270.144720, 271.146062, 272.146317, 273.148863,
274.149492, 275.152176, 276.153034, 277.155647, 278.156469, 279.158861,
280.159795, 281.162061, 272.153615, 272.153615, 273.153682, 274.155713,
275.156142, 276.158493, 277.159519, 278.161604, 279.162468, 280.164473,
281.165372, 282.167486, 283.168415, 283.171792, 277.163943, 278.164312,
279.166546, 280.167039, 281.169286, 282.169765, 283.171792, 284.172384,
285.174105, 283.176451, 283.176451, 284.178080, 285.178732, 286.180481,
287.181045, 285.183698, 285.183698, 286.183855, 287.185599, 288.185689,
289.187279, 287.191186, 287.191186, 288.192492, 289.192715, 290.194141,
291.194384, 292.199786, 289.198862, 290.198590, 291.200011, 292.199786,
291.206564, 291.206564, 292.207549, 293.214670, 293.214670]
el2mass = dict(zip(_temp_symbol, _temp_mass))
el2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100.
eliso2mass = dict(zip(_temp_iso_symbol, _temp_iso_mass)) # encompasses el2mass
eliso2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100. # encompasses el2mass
#eliso2mass["X0"] = 0. # probably needed, just checking
el2z = dict(zip(_temp_symbol, _temp_z))
el2z["GH"] = 0
z2mass = dict(zip(_temp_z, _temp_mass))
z2el = dict(zip(_temp_z, _temp_symbol))
z2element = dict(zip(_temp_z, _temp_element))
| gpl-2.0 |
muffinresearch/amo-validator | validator/outputhandlers/shellcolors.py | 8 | 2788 | import re
try:
import curses
except ImportError:
curses = None
import os
import sys
from StringIO import StringIO
COLORS = ('BLUE', 'RED', 'GREEN', 'YELLOW', 'WHITE', 'BLACK')
class OutputHandler:
"""A handler that hooks up with the error bundler to colorize the
output of the application for *nix-based terminals."""
def __init__(self, buffer=sys.stdout, no_color=False):
if not curses:
no_color = True
if not no_color:
no_color = isinstance(sys.stdout, StringIO) or \
not sys.stdout.isatty()
self.no_color = no_color
# Don't bother initiating color if there's no color.
if not no_color:
# Get curses all ready to write some stuff to the screen.
curses.setupterm()
# Initialize a store for the colors and pre-populate it
# with the un-color color.
self.colors = {'NORMAL': curses.tigetstr('sgr0') or ''}
# Determines capabilities of the terminal.
fgColorSeq = curses.tigetstr('setaf') or \
curses.tigetstr('setf') or ''
# Go through each color and figure out what the sequences
# are for each, then store the sequences in the store we
# made above.
for color in COLORS:
colorIndex = getattr(curses, 'COLOR_%s' % color)
self.colors[color] = curses.tparm(fgColorSeq,
colorIndex)
self.buffer = buffer
def colorize_text(self, text):
"""Adds escape sequences to colorize text and make it
beautiful. To colorize text, prefix the text you want to color
with the color (capitalized) wrapped in double angle brackets
(i.e.: <<GREEN>>). End your string with <<NORMAL>>. If you
don't, it will be done for you (assuming you used a color code
in your string."""
# Take note of where the escape sequences are.
rnormal = text.rfind('<<NORMAL')
rany = text.rfind('<<')
# Put in the escape sequences.
for color, code in self.colors.items():
text = text.replace('<<%s>>' % color, code)
# Make sure that the last sequence is a NORMAL sequence.
if rany > -1 and rnormal < rany:
text += self.colors['NORMAL']
return text
def write(self, text):
'Uses curses to print in the fanciest way possible.'
# Add color to the terminal.
if not self.no_color:
text = self.colorize_text(text)
else:
pattern = re.compile('\<\<[A-Z]*?\>\>')
text = pattern.sub('', text)
text += '\n'
self.buffer.write(text)
return self
| bsd-3-clause |
evanma92/routeh | flask/lib/python2.7/site-packages/passlib/utils/handlers.py | 19 | 62539 | """passlib.handler - code for implementing handlers, and global registry for handlers"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import inspect
import re
import hashlib
import logging; log = logging.getLogger(__name__)
import time
import os
from warnings import warn
# site
# pkg
import passlib.exc as exc
from passlib.exc import MissingBackendError, PasslibConfigWarning, \
PasslibHashWarning
from passlib.ifc import PasswordHash
from passlib.registry import get_crypt_handler
from passlib.utils import classproperty, consteq, getrandstr, getrandbytes,\
BASE64_CHARS, HASH64_CHARS, rng, to_native_str, \
is_crypt_handler, to_unicode, \
MAX_PASSWORD_SIZE
from passlib.utils.compat import b, join_byte_values, bytes, irange, u, \
uascii_to_str, join_unicode, unicode, str_to_uascii, \
join_unicode, base_string_types, PY2, int_types
# local
__all__ = [
# helpers for implementing MCF handlers
'parse_mc2',
'parse_mc3',
'render_mc2',
'render_mc3',
# framework for implementing handlers
'GenericHandler',
'StaticHandler',
'HasUserContext',
'HasRawChecksum',
'HasManyIdents',
'HasSalt',
'HasRawSalt',
'HasRounds',
'HasManyBackends',
# other helpers
'PrefixWrapper',
]
#=============================================================================
# constants
#=============================================================================
# common salt_chars & checksum_chars values
# (BASE64_CHARS, HASH64_CHARS imported above)
PADDED_BASE64_CHARS = BASE64_CHARS + u("=")
HEX_CHARS = u("0123456789abcdefABCDEF")
UPPER_HEX_CHARS = u("0123456789ABCDEF")
LOWER_HEX_CHARS = u("0123456789abcdef")
# special byte string containing all possible byte values
# XXX: treated as singleton by some of the code for efficiency.
ALL_BYTE_VALUES = join_byte_values(irange(256))
# deprecated aliases - will be removed after passlib 1.8
H64_CHARS = HASH64_CHARS
B64_CHARS = BASE64_CHARS
PADDED_B64_CHARS = PADDED_BASE64_CHARS
UC_HEX_CHARS = UPPER_HEX_CHARS
LC_HEX_CHARS = LOWER_HEX_CHARS
#=============================================================================
# support functions
#=============================================================================
def _bitsize(count, chars):
"""helper for bitsize() methods"""
if chars and count:
import math
return int(count * math.log(len(chars), 2))
else:
return 0
#=============================================================================
# parsing helpers
#=============================================================================
_UDOLLAR = u("$")
_UZERO = u("0")
def validate_secret(secret):
"ensure secret has correct type & size"
if not isinstance(secret, base_string_types):
raise exc.ExpectedStringError(secret, "secret")
if len(secret) > MAX_PASSWORD_SIZE:
raise exc.PasswordSizeError()
def to_unicode_for_identify(hash):
"convert hash to unicode for identify method"
if isinstance(hash, unicode):
return hash
elif isinstance(hash, bytes):
# try as utf-8, but if it fails, use foolproof latin-1,
# since we don't really care about non-ascii chars
# when running identify.
try:
return hash.decode("utf-8")
except UnicodeDecodeError:
return hash.decode("latin-1")
else:
raise exc.ExpectedStringError(hash, "hash")
def parse_mc2(hash, prefix, sep=_UDOLLAR, handler=None):
"""parse hash using 2-part modular crypt format.
this expects a hash of the format :samp:`{prefix}{salt}[${checksum}]`,
such as md5_crypt, and parses it into salt / checksum portions.
:arg hash: the hash to parse (bytes or unicode)
:arg prefix: the identifying prefix (unicode)
:param sep: field separator (unicode, defaults to ``$``).
:param handler: handler class to pass to error constructors.
:returns:
a ``(salt, chk | None)`` tuple.
"""
# detect prefix
hash = to_unicode(hash, "ascii", "hash")
assert isinstance(prefix, unicode)
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
# parse 2-part hash or 1-part config string
assert isinstance(sep, unicode)
parts = hash[len(prefix):].split(sep)
if len(parts) == 2:
salt, chk = parts
return salt, chk or None
elif len(parts) == 1:
return parts[0], None
else:
raise exc.MalformedHashError(handler)
def parse_mc3(hash, prefix, sep=_UDOLLAR, rounds_base=10,
default_rounds=None, handler=None):
"""parse hash using 3-part modular crypt format.
this expects a hash of the format :samp:`{prefix}[{rounds}]${salt}[${checksum}]`,
such as sha1_crypt, and parses it into rounds / salt / checksum portions.
tries to convert the rounds to an integer,
and throws error if it has zero-padding.
:arg hash: the hash to parse (bytes or unicode)
:arg prefix: the identifying prefix (unicode)
:param sep: field separator (unicode, defaults to ``$``).
:param rounds_base:
the numeric base the rounds are encoded in (defaults to base 10).
:param default_rounds:
the default rounds value to return if the rounds field was omitted.
if this is ``None`` (the default), the rounds field is *required*.
:param handler: handler class to pass to error constructors.
:returns:
a ``(rounds : int, salt, chk | None)`` tuple.
"""
# detect prefix
hash = to_unicode(hash, "ascii", "hash")
assert isinstance(prefix, unicode)
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
# parse 3-part hash or 2-part config string
assert isinstance(sep, unicode)
parts = hash[len(prefix):].split(sep)
if len(parts) == 3:
rounds, salt, chk = parts
elif len(parts) == 2:
rounds, salt = parts
chk = None
else:
raise exc.MalformedHashError(handler)
# validate & parse rounds portion
if rounds.startswith(_UZERO) and rounds != _UZERO:
raise exc.ZeroPaddedRoundsError(handler)
elif rounds:
rounds = int(rounds, rounds_base)
elif default_rounds is None:
raise exc.MalformedHashError(handler, "empty rounds field")
else:
rounds = default_rounds
# return result
return rounds, salt, chk or None
#=============================================================================
# formatting helpers
#=============================================================================
def render_mc2(ident, salt, checksum, sep=u("$")):
"""format hash using 2-part modular crypt format; inverse of parse_mc2()
returns native string with format :samp:`{ident}{salt}[${checksum}]`,
such as used by md5_crypt.
:arg ident: identifier prefix (unicode)
:arg salt: encoded salt (unicode)
:arg checksum: encoded checksum (unicode or None)
:param sep: separator char (unicode, defaults to ``$``)
:returns:
config or hash (native str)
"""
if checksum:
parts = [ident, salt, sep, checksum]
else:
parts = [ident, salt]
return uascii_to_str(join_unicode(parts))
def render_mc3(ident, rounds, salt, checksum, sep=u("$"), rounds_base=10):
"""format hash using 3-part modular crypt format; inverse of parse_mc3()
returns native string with format :samp:`{ident}[{rounds}$]{salt}[${checksum}]`,
such as used by sha1_crypt.
:arg ident: identifier prefix (unicode)
:arg rounds: rounds field (int or None)
:arg salt: encoded salt (unicode)
:arg checksum: encoded checksum (unicode or None)
:param sep: separator char (unicode, defaults to ``$``)
:param rounds_base: base to encode rounds value (defaults to base 10)
:returns:
config or hash (native str)
"""
if rounds is None:
rounds = u('')
elif rounds_base == 16:
rounds = u("%x") % rounds
else:
assert rounds_base == 10
rounds = unicode(rounds)
if checksum:
parts = [ident, rounds, sep, salt, sep, checksum]
else:
parts = [ident, rounds, sep, salt]
return uascii_to_str(join_unicode(parts))
#=============================================================================
# GenericHandler
#=============================================================================
class GenericHandler(PasswordHash):
"""helper class for implementing hash handlers.
GenericHandler-derived classes will have (at least) the following
constructor options, though others may be added by mixins
and by the class itself:
:param checksum:
this should contain the digest portion of a
parsed hash (mainly provided when the constructor is called
by :meth:`from_string()`).
defaults to ``None``.
:param use_defaults:
If ``False`` (the default), a :exc:`TypeError` should be thrown
if any settings required by the handler were not explicitly provided.
If ``True``, the handler should attempt to provide a default for any
missing values. This means generate missing salts, fill in default
cost parameters, etc.
This is typically only set to ``True`` when the constructor
is called by :meth:`encrypt`, allowing user-provided values
to be handled in a more permissive manner.
:param relaxed:
If ``False`` (the default), a :exc:`ValueError` should be thrown
if any settings are out of bounds or otherwise invalid.
If ``True``, they should be corrected if possible, and a warning
issue. If not possible, only then should an error be raised.
(e.g. under ``relaxed=True``, rounds values will be clamped
to min/max rounds).
This is mainly used when parsing the config strings of certain
hashes, whose specifications implementations to be tolerant
of incorrect values in salt strings.
Class Attributes
================
.. attribute:: ident
[optional]
If this attribute is filled in, the default :meth:`identify` method will use
it as a identifying prefix that can be used to recognize instances of this handler's
hash. Filling this out is recommended for speed.
This should be a unicode str.
.. attribute:: _hash_regex
[optional]
If this attribute is filled in, the default :meth:`identify` method
will use it to recognize instances of the hash. If :attr:`ident`
is specified, this will be ignored.
This should be a unique regex object.
.. attribute:: checksum_size
[optional]
Specifies the number of characters that should be expected in the checksum string.
If omitted, no check will be performed.
.. attribute:: checksum_chars
[optional]
A string listing all the characters allowed in the checksum string.
If omitted, no check will be performed.
This should be a unicode str.
.. attribute:: _stub_checksum
[optional]
If specified, hashes with this checksum will have their checksum
normalized to ``None``, treating it like a config string.
This is mainly used by hash formats which don't have a concept
of a config string, so a unlikely-to-occur checksum (e.g. all zeros)
is used by some implementations.
This should be a string of the same datatype as :attr:`checksum`,
or ``None``.
Instance Attributes
===================
.. attribute:: checksum
The checksum string provided to the constructor (after passing it
through :meth:`_norm_checksum`).
Required Subclass Methods
=========================
The following methods must be provided by handler subclass:
.. automethod:: from_string
.. automethod:: to_string
.. automethod:: _calc_checksum
Default Methods
===============
The following methods have default implementations that should work for
most cases, though they may be overridden if the hash subclass needs to:
.. automethod:: _norm_checksum
.. automethod:: genconfig
.. automethod:: genhash
.. automethod:: identify
.. automethod:: encrypt
.. automethod:: verify
"""
#===================================================================
# class attr
#===================================================================
# this must be provided by the actual class.
setting_kwds = None
# providing default since most classes don't use this at all.
context_kwds = ()
# optional prefix that uniquely identifies hash
ident = None
# optional regexp for recognizing hashes,
# used by default identify() if .ident isn't specified.
_hash_regex = None
# if specified, _norm_checksum will require this length
checksum_size = None
# if specified, _norm_checksum() will validate this
checksum_chars = None
# if specified, hashes with this checksum will be treated
# as if no checksum was specified.
_stub_checksum = None
# private flag used by HasRawChecksum
_checksum_is_bytes = False
#===================================================================
# instance attrs
#===================================================================
checksum = None # stores checksum
# use_defaults = False # whether _norm_xxx() funcs should fill in defaults.
# relaxed = False # when _norm_xxx() funcs should be strict about inputs
#===================================================================
# init
#===================================================================
def __init__(self, checksum=None, use_defaults=False, relaxed=False,
**kwds):
self.use_defaults = use_defaults
self.relaxed = relaxed
super(GenericHandler, self).__init__(**kwds)
self.checksum = self._norm_checksum(checksum)
def _norm_checksum(self, checksum):
"""validates checksum keyword against class requirements,
returns normalized version of checksum.
"""
# NOTE: by default this code assumes checksum should be unicode.
# For classes where the checksum is raw bytes, the HasRawChecksum sets
# the _checksum_is_bytes flag which alters various code paths below.
if checksum is None:
return None
# normalize to bytes / unicode
raw = self._checksum_is_bytes
if raw:
# NOTE: no clear route to reasonbly convert unicode -> raw bytes,
# so relaxed does nothing here
if not isinstance(checksum, bytes):
raise exc.ExpectedTypeError(checksum, "bytes", "checksum")
elif not isinstance(checksum, unicode):
if isinstance(checksum, bytes) and self.relaxed:
warn("checksum should be unicode, not bytes",
PasslibHashWarning)
checksum = checksum.decode("ascii")
else:
raise exc.ExpectedTypeError(checksum, "unicode", "checksum")
# handle stub
if checksum == self._stub_checksum:
return None
# check size
cc = self.checksum_size
if cc and len(checksum) != cc:
raise exc.ChecksumSizeError(self, raw=raw)
# check charset
if not raw:
cs = self.checksum_chars
if cs and any(c not in cs for c in checksum):
raise ValueError("invalid characters in %s checksum" %
(self.name,))
return checksum
#===================================================================
# password hash api - formatting interface
#===================================================================
@classmethod
def identify(cls, hash):
# NOTE: subclasses may wish to use faster / simpler identify,
# and raise value errors only when an invalid (but identifiable)
# string is parsed
hash = to_unicode_for_identify(hash)
if not hash:
return False
# does class specify a known unique prefix to look for?
ident = cls.ident
if ident is not None:
return hash.startswith(ident)
# does class provide a regexp to use?
pat = cls._hash_regex
if pat is not None:
return pat.match(hash) is not None
# as fallback, try to parse hash, and see if we succeed.
# inefficient, but works for most cases.
try:
cls.from_string(hash)
return True
except ValueError:
return False
@classmethod
def from_string(cls, hash, **context): # pragma: no cover
"""return parsed instance from hash/configuration string
:param \*\*context:
context keywords to pass to constructor (if applicable).
:raises ValueError: if hash is incorrectly formatted
:returns:
hash parsed into components,
for formatting / calculating checksum.
"""
raise NotImplementedError("%s must implement from_string()" % (cls,))
def to_string(self): # pragma: no cover
"""render instance to hash or configuration string
:returns:
if :attr:`checksum` is set, should return full hash string.
if not, should either return abbreviated configuration string,
or fill in a stub checksum.
should return native string type (ascii-bytes under python 2,
unicode under python 3)
"""
# NOTE: documenting some non-standardized but common kwd flags
# that passlib to_string() method may have:
#
# withchk=True -- if false, omit checksum portion of hash
#
raise NotImplementedError("%s must implement from_string()" %
(self.__class__,))
##def to_config_string(self):
## "helper for generating configuration string (ignoring hash)"
## orig = self.checksum
## try:
## self.checksum = None
## return self.to_string()
## finally:
## self.checksum = orig
#===================================================================
#'crypt-style' interface (default implementation)
#===================================================================
@classmethod
def genconfig(cls, **settings):
return cls(use_defaults=True, **settings).to_string()
@classmethod
def genhash(cls, secret, config, **context):
validate_secret(secret)
self = cls.from_string(config, **context)
self.checksum = self._calc_checksum(secret)
return self.to_string()
def _calc_checksum(self, secret): # pragma: no cover
"""given secret; calcuate and return encoded checksum portion of hash
string, taking config from object state
calc checksum implementations may assume secret is always
either unicode or bytes, checks are performed by verify/etc.
"""
raise NotImplementedError("%s must implement _calc_checksum()" %
(self.__class__,))
#===================================================================
#'application' interface (default implementation)
#===================================================================
@classmethod
def encrypt(cls, secret, **kwds):
validate_secret(secret)
self = cls(use_defaults=True, **kwds)
self.checksum = self._calc_checksum(secret)
return self.to_string()
@classmethod
def verify(cls, secret, hash, **context):
# NOTE: classes with multiple checksum encodings should either
# override this method, or ensure that from_string() / _norm_checksum()
# ensures .checksum always uses a single canonical representation.
validate_secret(secret)
self = cls.from_string(hash, **context)
chk = self.checksum
if chk is None:
raise exc.MissingDigestError(cls)
return consteq(self._calc_checksum(secret), chk)
#===================================================================
# experimental - the following methods are not finished or tested,
# but way work correctly for some hashes
#===================================================================
_unparsed_settings = ("salt_size", "relaxed")
_unsafe_settings = ("salt", "checksum")
@classproperty
def _parsed_settings(cls):
return (key for key in cls.setting_kwds
if key not in cls._unparsed_settings)
@staticmethod
def _sanitize(value, char=u("*")):
"default method to obscure sensitive fields"
if value is None:
return None
if isinstance(value, bytes):
from passlib.utils import ab64_encode
value = ab64_encode(value).decode("ascii")
elif not isinstance(value, unicode):
value = unicode(value)
size = len(value)
clip = min(4, size//8)
return value[:clip] + char * (size-clip)
@classmethod
def parsehash(cls, hash, checksum=True, sanitize=False):
"""[experimental method] parse hash into dictionary of settings.
this essentially acts as the inverse of :meth:`encrypt`: for most
cases, if ``hash = cls.encrypt(secret, **opts)``, then
``cls.parsehash(hash)`` will return a dict matching the original options
(with the extra keyword *checksum*).
this method may not work correctly for all hashes,
and may not be available on some few. it's interface may
change in future releases, if it's kept around at all.
:arg hash: hash to parse
:param checksum: include checksum keyword? (defaults to True)
:param sanitize: mask data for sensitive fields? (defaults to False)
"""
# FIXME: this may not work for hashes with non-standard settings.
# XXX: how should this handle checksum/salt encoding?
# need to work that out for encrypt anyways.
self = cls.from_string(hash)
# XXX: could split next few lines out as self._parsehash() for subclassing
# XXX: could try to resolve ident/variant to publically suitable alias.
UNSET = object()
kwds = dict((key, getattr(self, key)) for key in self._parsed_settings
if getattr(self, key) != getattr(cls, key, UNSET))
if checksum and self.checksum is not None:
kwds['checksum'] = self.checksum
if sanitize:
if sanitize is True:
sanitize = cls._sanitize
for key in cls._unsafe_settings:
if key in kwds:
kwds[key] = sanitize(kwds[key])
return kwds
@classmethod
def bitsize(cls, **kwds):
"[experimental method] return info about bitsizes of hash"
try:
info = super(GenericHandler, cls).bitsize(**kwds)
except AttributeError:
info = {}
cc = ALL_BYTE_VALUES if cls._checksum_is_bytes else cls.checksum_chars
if cls.checksum_size and cc:
# FIXME: this may overestimate size due to padding bits (e.g. bcrypt)
# FIXME: this will be off by 1 for case-insensitive hashes.
info['checksum'] = _bitsize(cls.checksum_size, cc)
return info
#===================================================================
# eoc
#===================================================================
class StaticHandler(GenericHandler):
"""GenericHandler mixin for classes which have no settings.
This mixin assumes the entirety of the hash ise stored in the
:attr:`checksum` attribute; that the hash has no rounds, salt,
etc. This class provides the following:
* a default :meth:`genconfig` that always returns None.
* a default :meth:`from_string` and :meth:`to_string`
that store the entire hash within :attr:`checksum`,
after optionally stripping a constant prefix.
All that is required by subclasses is an implementation of
the :meth:`_calc_checksum` method.
"""
# TODO: document _norm_hash()
setting_kwds = ()
# optional constant prefix subclasses can specify
_hash_prefix = u("")
@classmethod
def from_string(cls, hash, **context):
# default from_string() which strips optional prefix,
# and passes rest unchanged as checksum value.
hash = to_unicode(hash, "ascii", "hash")
hash = cls._norm_hash(hash)
# could enable this for extra strictness
##pat = cls._hash_regex
##if pat and pat.match(hash) is None:
## raise ValueError("not a valid %s hash" % (cls.name,))
prefix = cls._hash_prefix
if prefix:
if hash.startswith(prefix):
hash = hash[len(prefix):]
else:
raise exc.InvalidHashError(cls)
return cls(checksum=hash, **context)
@classmethod
def _norm_hash(cls, hash):
"helper for subclasses to normalize case if needed"
return hash
def to_string(self):
assert self.checksum is not None
return uascii_to_str(self._hash_prefix + self.checksum)
@classmethod
def genconfig(cls):
# since it has no settings, there's no need for a config string.
return None
@classmethod
def genhash(cls, secret, config, **context):
# since it has no settings, just verify config, and call encrypt()
if config is not None and not cls.identify(config):
raise exc.InvalidHashError(cls)
return cls.encrypt(secret, **context)
# per-subclass: stores dynamically created subclass used by _calc_checksum() stub
__cc_compat_hack = None
def _calc_checksum(self, secret):
"""given secret; calcuate and return encoded checksum portion of hash
string, taking config from object state
"""
# NOTE: prior to 1.6, StaticHandler required classes implement genhash
# instead of this method. so if we reach here, we try calling genhash.
# if that succeeds, we issue deprecation warning. if it fails,
# we'll just recurse back to here, but in a different instance.
# so before we call genhash, we create a subclass which handles
# throwing the NotImplementedError.
cls = self.__class__
assert cls.__module__ != __name__
wrapper_cls = cls.__cc_compat_hack
if wrapper_cls is None:
def inner(self, secret):
raise NotImplementedError("%s must implement _calc_checksum()" %
(cls,))
wrapper_cls = cls.__cc_compat_hack = type(cls.__name__ + "_wrapper",
(cls,), dict(_calc_checksum=inner, __module__=cls.__module__))
context = dict((k,getattr(self,k)) for k in self.context_kwds)
hash = wrapper_cls.genhash(secret, None, **context)
warn("%r should be updated to implement StaticHandler._calc_checksum() "
"instead of StaticHandler.genhash(), support for the latter "
"style will be removed in Passlib 1.8" % (cls),
DeprecationWarning)
return str_to_uascii(hash)
#=============================================================================
# GenericHandler mixin classes
#=============================================================================
class HasEncodingContext(GenericHandler):
"""helper for classes which require knowledge of the encoding used"""
context_kwds = ("encoding",)
default_encoding = "utf-8"
def __init__(self, encoding=None, **kwds):
super(HasEncodingContext, self).__init__(**kwds)
self.encoding = encoding or self.default_encoding
class HasUserContext(GenericHandler):
"""helper for classes which require a user context keyword"""
context_kwds = ("user",)
def __init__(self, user=None, **kwds):
super(HasUserContext, self).__init__(**kwds)
self.user = user
# XXX: would like to validate user input here, but calls to from_string()
# which lack context keywords would then fail; so leaving code per-handler.
# wrap funcs to accept 'user' as positional arg for ease of use.
@classmethod
def encrypt(cls, secret, user=None, **context):
return super(HasUserContext, cls).encrypt(secret, user=user, **context)
@classmethod
def verify(cls, secret, hash, user=None, **context):
return super(HasUserContext, cls).verify(secret, hash, user=user, **context)
@classmethod
def genhash(cls, secret, config, user=None, **context):
return super(HasUserContext, cls).genhash(secret, config, user=user, **context)
# XXX: how to guess the entropy of a username?
# most of these hashes are for a system (e.g. Oracle)
# which has a few *very common* names and thus really low entropy;
# while the rest are slightly less predictable.
# need to find good reference about this.
##@classmethod
##def bitsize(cls, **kwds):
## info = super(HasUserContext, cls).bitsize(**kwds)
## info['user'] = xxx
## return info
#------------------------------------------------------------------------
# checksum mixins
#------------------------------------------------------------------------
class HasRawChecksum(GenericHandler):
"""mixin for classes which work with decoded checksum bytes
.. todo::
document this class's usage
"""
# NOTE: GenericHandler.checksum_chars is ignored by this implementation.
# NOTE: all HasRawChecksum code is currently part of GenericHandler,
# using private '_checksum_is_bytes' flag.
# this arrangement may be changed in the future.
_checksum_is_bytes = True
#------------------------------------------------------------------------
# ident mixins
#------------------------------------------------------------------------
class HasManyIdents(GenericHandler):
"""mixin for hashes which use multiple prefix identifiers
For the hashes which may use multiple identifier prefixes,
this mixin adds an ``ident`` keyword to constructor.
Any value provided is passed through the :meth:`norm_idents` method,
which takes care of validating the identifier,
as well as allowing aliases for easier specification
of the identifiers by the user.
.. todo::
document this class's usage
"""
#===================================================================
# class attrs
#===================================================================
default_ident = None # should be unicode
ident_values = None # should be list of unicode strings
ident_aliases = None # should be dict of unicode -> unicode
# NOTE: any aliases provided to norm_ident() as bytes
# will have been converted to unicode before
# comparing against this dictionary.
# NOTE: relying on test_06_HasManyIdents() to verify
# these are configured correctly.
#===================================================================
# instance attrs
#===================================================================
ident = None
#===================================================================
# init
#===================================================================
def __init__(self, ident=None, **kwds):
super(HasManyIdents, self).__init__(**kwds)
self.ident = self._norm_ident(ident)
def _norm_ident(self, ident):
# fill in default identifier
if ident is None:
if not self.use_defaults:
raise TypeError("no ident specified")
ident = self.default_ident
assert ident is not None, "class must define default_ident"
# handle unicode
if isinstance(ident, bytes):
ident = ident.decode('ascii')
# check if identifier is valid
iv = self.ident_values
if ident in iv:
return ident
# resolve aliases, and recheck against ident_values
ia = self.ident_aliases
if ia:
try:
value = ia[ident]
except KeyError:
pass
else:
if value in iv:
return value
# failure!
raise ValueError("invalid ident: %r" % (ident,))
#===================================================================
# password hash api
#===================================================================
@classmethod
def identify(cls, hash):
hash = to_unicode_for_identify(hash)
return any(hash.startswith(ident) for ident in cls.ident_values)
@classmethod
def _parse_ident(cls, hash):
"""extract ident prefix from hash, helper for subclasses' from_string()"""
hash = to_unicode(hash, "ascii", "hash")
for ident in cls.ident_values:
if hash.startswith(ident):
return ident, hash[len(ident):]
raise exc.InvalidHashError(cls)
#===================================================================
# eoc
#===================================================================
#------------------------------------------------------------------------
# salt mixins
#------------------------------------------------------------------------
class HasSalt(GenericHandler):
"""mixin for validating salts.
This :class:`GenericHandler` mixin adds a ``salt`` keyword to the class constuctor;
any value provided is passed through the :meth:`_norm_salt` method,
which takes care of validating salt length and content,
as well as generating new salts if one it not provided.
:param salt:
optional salt string
:param salt_size:
optional size of salt (only used if no salt provided);
defaults to :attr:`default_salt_size`.
Class Attributes
================
In order for :meth:`!_norm_salt` to do it's job, the following
attributes should be provided by the handler subclass:
.. attribute:: min_salt_size
The minimum number of characters allowed in a salt string.
An :exc:`ValueError` will be throw if the provided salt is too small.
Defaults to ``None``, for no minimum.
.. attribute:: max_salt_size
The maximum number of characters allowed in a salt string.
By default an :exc:`ValueError` will be throw if the provided salt is
too large; but if ``relaxed=True``, it will be clipped and a warning
issued instead. Defaults to ``None``, for no maximum.
.. attribute:: default_salt_size
[required]
If no salt is provided, this should specify the size of the salt
that will be generated by :meth:`_generate_salt`. By default
this will fall back to :attr:`max_salt_size`.
.. attribute:: salt_chars
A string containing all the characters which are allowed in the salt
string. An :exc:`ValueError` will be throw if any other characters
are encountered. May be set to ``None`` to skip this check (but see
in :attr:`default_salt_chars`).
.. attribute:: default_salt_chars
[required]
This attribute controls the set of characters use to generate
*new* salt strings. By default, it mirrors :attr:`salt_chars`.
If :attr:`!salt_chars` is ``None``, this attribute must be specified
in order to generate new salts. Aside from that purpose,
the main use of this attribute is for hashes which wish to generate
salts from a restricted subset of :attr:`!salt_chars`; such as
accepting all characters, but only using a-z.
Instance Attributes
===================
.. attribute:: salt
This instance attribute will be filled in with the salt provided
to the constructor (as adapted by :meth:`_norm_salt`)
Subclassable Methods
====================
.. automethod:: _norm_salt
.. automethod:: _generate_salt
"""
# TODO: document _truncate_salt()
# XXX: allow providing raw salt to this class, and encoding it?
#===================================================================
# class attrs
#===================================================================
min_salt_size = None
max_salt_size = None
salt_chars = None
@classproperty
def default_salt_size(cls):
"default salt size (defaults to *max_salt_size*)"
return cls.max_salt_size
@classproperty
def default_salt_chars(cls):
"charset used to generate new salt strings (defaults to *salt_chars*)"
return cls.salt_chars
# private helpers for HasRawSalt, shouldn't be used by subclasses
_salt_is_bytes = False
_salt_unit = "chars"
#===================================================================
# instance attrs
#===================================================================
salt = None
#===================================================================
# init
#===================================================================
def __init__(self, salt=None, salt_size=None, **kwds):
super(HasSalt, self).__init__(**kwds)
self.salt = self._norm_salt(salt, salt_size=salt_size)
def _norm_salt(self, salt, salt_size=None):
"""helper to normalize & validate user-provided salt string
If no salt provided, a random salt is generated
using :attr:`default_salt_size` and :attr:`default_salt_chars`.
:arg salt: salt string or ``None``
:param salt_size: optionally specified size of autogenerated salt
:raises TypeError:
If salt not provided and ``use_defaults=False``.
:raises ValueError:
* if salt contains chars that aren't in :attr:`salt_chars`.
* if salt contains less than :attr:`min_salt_size` characters.
* if ``relaxed=False`` and salt has more than :attr:`max_salt_size`
characters (if ``relaxed=True``, the salt is truncated
and a warning is issued instead).
:returns:
normalized or generated salt
"""
# generate new salt if none provided
if salt is None:
if not self.use_defaults:
raise TypeError("no salt specified")
if salt_size is None:
salt_size = self.default_salt_size
salt = self._generate_salt(salt_size)
# check type
if self._salt_is_bytes:
if not isinstance(salt, bytes):
raise exc.ExpectedTypeError(salt, "bytes", "salt")
else:
if not isinstance(salt, unicode):
# NOTE: allowing bytes under py2 so salt can be native str.
if isinstance(salt, bytes) and (PY2 or self.relaxed):
salt = salt.decode("ascii")
else:
raise exc.ExpectedTypeError(salt, "unicode", "salt")
# check charset
sc = self.salt_chars
if sc is not None and any(c not in sc for c in salt):
raise ValueError("invalid characters in %s salt" % self.name)
# check min size
mn = self.min_salt_size
if mn and len(salt) < mn:
msg = "salt too small (%s requires %s %d %s)" % (self.name,
"exactly" if mn == self.max_salt_size else ">=", mn,
self._salt_unit)
raise ValueError(msg)
# check max size
mx = self.max_salt_size
if mx and len(salt) > mx:
msg = "salt too large (%s requires %s %d %s)" % (self.name,
"exactly" if mx == mn else "<=", mx, self._salt_unit)
if self.relaxed:
warn(msg, PasslibHashWarning)
salt = self._truncate_salt(salt, mx)
else:
raise ValueError(msg)
return salt
@staticmethod
def _truncate_salt(salt, mx):
# NOTE: some hashes (e.g. bcrypt) has structure within their
# salt string. this provides a method to overide to perform
# the truncation properly
return salt[:mx]
def _generate_salt(self, salt_size):
"""helper method for _norm_salt(); generates a new random salt string.
:arg salt_size: salt size to generate
"""
return getrandstr(rng, self.default_salt_chars, salt_size)
@classmethod
def bitsize(cls, salt_size=None, **kwds):
"[experimental method] return info about bitsizes of hash"
info = super(HasSalt, cls).bitsize(**kwds)
if salt_size is None:
salt_size = cls.default_salt_size
# FIXME: this may overestimate size due to padding bits
# FIXME: this will be off by 1 for case-insensitive hashes.
info['salt'] = _bitsize(salt_size, cls.default_salt_chars)
return info
#===================================================================
# eoc
#===================================================================
class HasRawSalt(HasSalt):
"""mixin for classes which use decoded salt parameter
A variant of :class:`!HasSalt` which takes in decoded bytes instead of an encoded string.
.. todo::
document this class's usage
"""
salt_chars = ALL_BYTE_VALUES
# NOTE: all HasRawSalt code is currently part of HasSalt, using private
# '_salt_is_bytes' flag. this arrangement may be changed in the future.
_salt_is_bytes = True
_salt_unit = "bytes"
def _generate_salt(self, salt_size):
assert self.salt_chars in [None, ALL_BYTE_VALUES]
return getrandbytes(rng, salt_size)
#------------------------------------------------------------------------
# rounds mixin
#------------------------------------------------------------------------
class HasRounds(GenericHandler):
"""mixin for validating rounds parameter
This :class:`GenericHandler` mixin adds a ``rounds`` keyword to the class
constuctor; any value provided is passed through the :meth:`_norm_rounds`
method, which takes care of validating the number of rounds.
:param rounds: optional number of rounds hash should use
Class Attributes
================
In order for :meth:`!_norm_rounds` to do it's job, the following
attributes must be provided by the handler subclass:
.. attribute:: min_rounds
The minimum number of rounds allowed. A :exc:`ValueError` will be
thrown if the rounds value is too small. Defaults to ``0``.
.. attribute:: max_rounds
The maximum number of rounds allowed. A :exc:`ValueError` will be
thrown if the rounds value is larger than this. Defaults to ``None``
which indicates no limit to the rounds value.
.. attribute:: default_rounds
If no rounds value is provided to constructor, this value will be used.
If this is not specified, a rounds value *must* be specified by the
application.
.. attribute:: rounds_cost
[required]
The ``rounds`` parameter typically encodes a cpu-time cost
for calculating a hash. This should be set to ``"linear"``
(the default) or ``"log2"``, depending on how the rounds value relates
to the actual amount of time that will be required.
Instance Attributes
===================
.. attribute:: rounds
This instance attribute will be filled in with the rounds value provided
to the constructor (as adapted by :meth:`_norm_rounds`)
Subclassable Methods
====================
.. automethod:: _norm_rounds
"""
#===================================================================
# class attrs
#===================================================================
min_rounds = 0
max_rounds = None
default_rounds = None
rounds_cost = "linear" # default to the common case
#===================================================================
# instance attrs
#===================================================================
rounds = None
#===================================================================
# init
#===================================================================
def __init__(self, rounds=None, **kwds):
super(HasRounds, self).__init__(**kwds)
self.rounds = self._norm_rounds(rounds)
def _norm_rounds(self, rounds):
"""helper routine for normalizing rounds
:arg rounds: ``None``, or integer cost parameter.
:raises TypeError:
* if ``use_defaults=False`` and no rounds is specified
* if rounds is not an integer.
:raises ValueError:
* if rounds is ``None`` and class does not specify a value for
:attr:`default_rounds`.
* if ``relaxed=False`` and rounds is outside bounds of
:attr:`min_rounds` and :attr:`max_rounds` (if ``relaxed=True``,
the rounds value will be clamped, and a warning issued).
:returns:
normalized rounds value
"""
# fill in default
if rounds is None:
if not self.use_defaults:
raise TypeError("no rounds specified")
rounds = self.default_rounds
if rounds is None:
raise TypeError("%s rounds value must be specified explicitly"
% (self.name,))
# check type
if not isinstance(rounds, int_types):
raise exc.ExpectedTypeError(rounds, "integer", "rounds")
# check bounds
mn = self.min_rounds
if rounds < mn:
msg = "rounds too low (%s requires >= %d rounds)" % (self.name, mn)
if self.relaxed:
warn(msg, PasslibHashWarning)
rounds = mn
else:
raise ValueError(msg)
mx = self.max_rounds
if mx and rounds > mx:
msg = "rounds too high (%s requires <= %d rounds)" % (self.name, mx)
if self.relaxed:
warn(msg, PasslibHashWarning)
rounds = mx
else:
raise ValueError(msg)
return rounds
@classmethod
def bitsize(cls, rounds=None, vary_rounds=.1, **kwds):
"[experimental method] return info about bitsizes of hash"
info = super(HasRounds, cls).bitsize(**kwds)
# NOTE: this essentially estimates how many bits of "salt"
# can be added by varying the rounds value just a little bit.
if cls.rounds_cost != "log2":
# assume rounds can be randomized within the range
# rounds*(1-vary_rounds) ... rounds*(1+vary_rounds)
# then this can be used to encode
# log2(rounds*(1+vary_rounds)-rounds*(1-vary_rounds))
# worth of salt-like bits. this works out to
# 1+log2(rounds*vary_rounds)
import math
if rounds is None:
rounds = cls.default_rounds
info['rounds'] = max(0, int(1+math.log(rounds*vary_rounds,2)))
## else: # log2 rounds
# all bits of the rounds value are critical to choosing
# the time-cost, and can't be randomized.
return info
#===================================================================
# eoc
#===================================================================
#------------------------------------------------------------------------
# backend mixin & helpers
#------------------------------------------------------------------------
##def _clear_backend(cls):
## "restore HasManyBackend subclass to unloaded state - used by unittests"
## assert issubclass(cls, HasManyBackends) and cls is not HasManyBackends
## if cls._backend:
## del cls._backend
## del cls._calc_checksum
class HasManyBackends(GenericHandler):
"""GenericHandler mixin which provides selecting from multiple backends.
.. todo::
finish documenting this class's usage
For hashes which need to select from multiple backends,
depending on the host environment, this class
offers a way to specify alternate :meth:`_calc_checksum` methods,
and will dynamically chose the best one at runtime.
Backend Methods
---------------
.. automethod:: get_backend
.. automethod:: set_backend
.. automethod:: has_backend
Subclass Hooks
--------------
The following attributes and methods should be filled in by the subclass
which is using :class:`HasManyBackends` as a mixin:
.. attribute:: backends
This attribute should be a tuple containing the names of the backends
which are supported. Two common names are ``"os_crypt"`` (if backend
uses :mod:`crypt`), and ``"builtin"`` (if the backend is a pure-python
fallback).
.. attribute:: _has_backend_{name}
private class attribute checked by :meth:`has_backend` to see if a
specific backend is available, it should be either ``True``
or ``False``. One of these should be provided by
the subclass for each backend listed in :attr:`backends`.
.. classmethod:: _calc_checksum_{name}
private class method that should implement :meth:`_calc_checksum`
for a given backend. it will only be called if the backend has
been selected by :meth:`set_backend`. One of these should be provided
by the subclass for each backend listed in :attr:`backends`.
"""
# NOTE:
# subclass must provide:
# * attr 'backends' containing list of known backends (top priority backend first)
# * attr '_has_backend_xxx' for each backend 'xxx', indicating if backend is available on system
# * attr '_calc_checksum_xxx' for each backend 'xxx', containing calc_checksum implementation using that backend
backends = None # list of backend names, provided by subclass.
_backend = None # holds currently loaded backend (if any) or None
@classmethod
def get_backend(cls):
"""return name of currently active backend.
if no backend has been loaded,
loads and returns name of default backend.
:raises passlib.exc.MissingBackendError: if no backends are available.
:returns: name of active backend
"""
name = cls._backend
if not name:
cls.set_backend()
name = cls._backend
assert name, "set_backend() didn't load any backends"
return name
@classmethod
def has_backend(cls, name="any"):
"""check if support is currently available for specified backend.
:arg name:
name of backend to check for.
defaults to ``"any"``,
but can be any string accepted by :meth:`set_backend`.
:raises ValueError: if backend name is unknown
:returns:
``True`` if backend is currently supported, else ``False``.
"""
if name in ("any", "default"):
if name == "any" and cls._backend:
return True
return any(getattr(cls, "_has_backend_" + name)
for name in cls.backends)
elif name in cls.backends:
return getattr(cls, "_has_backend_" + name)
else:
raise ValueError("unknown backend: %r" % (name,))
@classmethod
def _no_backends_msg(cls):
return "no %s backends available" % (cls.name,)
@classmethod
def set_backend(cls, name="any"):
"""load specified backend to be used for future _calc_checksum() calls
this method replaces :meth:`_calc_checksum` with a method
which uses the specified backend.
:arg name:
name of backend to load, defaults to ``"any"``.
this can be any of the following values:
* any string in :attr:`backends`,
indicating the specific backend to use.
* the special string ``"default"``, which means to use
the preferred backend on the given host
(this is generally the first backend in :attr:`backends`
which can be loaded).
* the special string ``"any"``, which means to use
the current backend if one has been loaded,
else acts like ``"default"``.
:raises passlib.exc.MissingBackendError:
* ... if a specific backend was requested,
but is not currently available.
* ... if ``"any"`` or ``"default"`` was specified,
and *no* backends are currently available.
:returns:
The return value of this function should be ignored.
"""
if name == "any":
name = cls._backend
if name:
return name
name = "default"
if name == "default":
for name in cls.backends:
if cls.has_backend(name):
break
else:
raise exc.MissingBackendError(cls._no_backends_msg())
elif not cls.has_backend(name):
raise exc.MissingBackendError("%s backend not available: %r" %
(cls.name, name))
cls._calc_checksum_backend = getattr(cls, "_calc_checksum_" + name)
cls._backend = name
return name
def _calc_checksum_backend(self, secret):
"stub for _calc_checksum_backend(), default backend will be selected first time stub is called"
# if we got here, no backend has been loaded; so load default backend
assert not self._backend, "set_backend() failed to replace lazy loader"
self.set_backend()
assert self._backend, "set_backend() failed to load a default backend"
# this should now invoke the backend-specific version, so call it again.
return self._calc_checksum_backend(secret)
def _calc_checksum(self, secret):
"wrapper for backend, for common code"""
return self._calc_checksum_backend(secret)
#=============================================================================
# wrappers
#=============================================================================
class PrefixWrapper(object):
"""wraps another handler, adding a constant prefix.
instances of this class wrap another password hash handler,
altering the constant prefix that's prepended to the wrapped
handlers' hashes.
this is used mainly by the :doc:`ldap crypt <passlib.hash.ldap_crypt>` handlers;
such as :class:`~passlib.hash.ldap_md5_crypt` which wraps :class:`~passlib.hash.md5_crypt` and adds a ``{CRYPT}`` prefix.
usage::
myhandler = PrefixWrapper("myhandler", "md5_crypt", prefix="$mh$", orig_prefix="$1$")
:param name: name to assign to handler
:param wrapped: handler object or name of registered handler
:param prefix: identifying prefix to prepend to all hashes
:param orig_prefix: prefix to strip (defaults to '').
:param lazy: if True and wrapped handler is specified by name, don't look it up until needed.
"""
def __init__(self, name, wrapped, prefix=u(''), orig_prefix=u(''), lazy=False,
doc=None, ident=None):
self.name = name
if isinstance(prefix, bytes):
prefix = prefix.decode("ascii")
self.prefix = prefix
if isinstance(orig_prefix, bytes):
orig_prefix = orig_prefix.decode("ascii")
self.orig_prefix = orig_prefix
if doc:
self.__doc__ = doc
if hasattr(wrapped, "name"):
self._check_handler(wrapped)
self._wrapped_handler = wrapped
else:
self._wrapped_name = wrapped
if not lazy:
self._get_wrapped()
if ident is not None:
if ident is True:
# signal that prefix is identifiable in itself.
if prefix:
ident = prefix
else:
raise ValueError("no prefix specified")
if isinstance(ident, bytes):
ident = ident.decode("ascii")
# XXX: what if ident includes parts of wrapped hash's ident?
if ident[:len(prefix)] != prefix[:len(ident)]:
raise ValueError("ident must agree with prefix")
self._ident = ident
_wrapped_name = None
_wrapped_handler = None
def _check_handler(self, handler):
if 'ident' in handler.setting_kwds and self.orig_prefix:
# TODO: look into way to fix the issues.
warn("PrefixWrapper: 'orig_prefix' option may not work correctly "
"for handlers which have multiple identifiers: %r" %
(handler.name,), exc.PasslibRuntimeWarning)
def _get_wrapped(self):
handler = self._wrapped_handler
if handler is None:
handler = get_crypt_handler(self._wrapped_name)
self._check_handler(handler)
self._wrapped_handler = handler
return handler
wrapped = property(_get_wrapped)
_ident = False
@property
def ident(self):
value = self._ident
if value is False:
value = None
# XXX: how will this interact with orig_prefix ?
# not exposing attrs for now if orig_prefix is set.
if not self.orig_prefix:
wrapped = self.wrapped
ident = getattr(wrapped, "ident", None)
if ident is not None:
value = self._wrap_hash(ident)
self._ident = value
return value
_ident_values = False
@property
def ident_values(self):
value = self._ident_values
if value is False:
value = None
# XXX: how will this interact with orig_prefix ?
# not exposing attrs for now if orig_prefix is set.
if not self.orig_prefix:
wrapped = self.wrapped
idents = getattr(wrapped, "ident_values", None)
if idents:
value = [ self._wrap_hash(ident) for ident in idents ]
##else:
## ident = self.ident
## if ident is not None:
## value = [ident]
self._ident_values = value
return value
# attrs that should be proxied
_proxy_attrs = (
"setting_kwds", "context_kwds",
"default_rounds", "min_rounds", "max_rounds", "rounds_cost",
"default_salt_size", "min_salt_size", "max_salt_size",
"salt_chars", "default_salt_chars",
"backends", "has_backend", "get_backend", "set_backend",
)
def __repr__(self):
args = [ repr(self._wrapped_name or self._wrapped_handler) ]
if self.prefix:
args.append("prefix=%r" % self.prefix)
if self.orig_prefix:
args.append("orig_prefix=%r" % self.orig_prefix)
args = ", ".join(args)
return 'PrefixWrapper(%r, %s)' % (self.name, args)
def __dir__(self):
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
wrapped = self.wrapped
attrs.update(
attr for attr in self._proxy_attrs
if hasattr(wrapped, attr)
)
return list(attrs)
def __getattr__(self, attr):
"proxy most attributes from wrapped class (e.g. rounds, salt size, etc)"
if attr in self._proxy_attrs:
return getattr(self.wrapped, attr)
raise AttributeError("missing attribute: %r" % (attr,))
def _unwrap_hash(self, hash):
"given hash belonging to wrapper, return orig version"
# NOTE: assumes hash has been validated as unicode already
prefix = self.prefix
if not hash.startswith(prefix):
raise exc.InvalidHashError(self)
# NOTE: always passing to handler as unicode, to save reconversion
return self.orig_prefix + hash[len(prefix):]
def _wrap_hash(self, hash):
"given orig hash; return one belonging to wrapper"
# NOTE: should usually be native string.
# (which does mean extra work under py2, but not py3)
if isinstance(hash, bytes):
hash = hash.decode("ascii")
orig_prefix = self.orig_prefix
if not hash.startswith(orig_prefix):
raise exc.InvalidHashError(self.wrapped)
wrapped = self.prefix + hash[len(orig_prefix):]
return uascii_to_str(wrapped)
def identify(self, hash):
hash = to_unicode_for_identify(hash)
if not hash.startswith(self.prefix):
return False
hash = self._unwrap_hash(hash)
return self.wrapped.identify(hash)
def genconfig(self, **kwds):
config = self.wrapped.genconfig(**kwds)
if config is None:
return None
else:
return self._wrap_hash(config)
def genhash(self, secret, config, **kwds):
if config is not None:
config = to_unicode(config, "ascii", "config/hash")
config = self._unwrap_hash(config)
return self._wrap_hash(self.wrapped.genhash(secret, config, **kwds))
def encrypt(self, secret, **kwds):
return self._wrap_hash(self.wrapped.encrypt(secret, **kwds))
def verify(self, secret, hash, **kwds):
hash = to_unicode(hash, "ascii", "hash")
hash = self._unwrap_hash(hash)
return self.wrapped.verify(secret, hash, **kwds)
#=============================================================================
# eof
#=============================================================================
| bsd-3-clause |
eerwitt/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/categorical_vocabulary.py | 94 | 4204 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical vocabulary classes to map categories to indexes.
Can be used for categorical variables, sparse variables and words.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
class CategoricalVocabulary(object):
"""Categorical variables vocabulary class.
Accumulates and provides mapping from classes to indexes.
Can be easily used for words.
"""
def __init__(self, unknown_token="<UNK>", support_reverse=True):
self._unknown_token = unknown_token
self._mapping = {unknown_token: 0}
self._support_reverse = support_reverse
if support_reverse:
self._reverse_mapping = [unknown_token]
self._freq = collections.defaultdict(int)
self._freeze = False
def __len__(self):
"""Returns total count of mappings. Including unknown token."""
return len(self._mapping)
def freeze(self, freeze=True):
"""Freezes the vocabulary, after which new words return unknown token id.
Args:
freeze: True to freeze, False to unfreeze.
"""
self._freeze = freeze
def get(self, category):
"""Returns word's id in the vocabulary.
If category is new, creates a new id for it.
Args:
category: string or integer to lookup in vocabulary.
Returns:
interger, id in the vocabulary.
"""
if category not in self._mapping:
if self._freeze:
return 0
self._mapping[category] = len(self._mapping)
if self._support_reverse:
self._reverse_mapping.append(category)
return self._mapping[category]
def add(self, category, count=1):
"""Adds count of the category to the frequency table.
Args:
category: string or integer, category to add frequency to.
count: optional integer, how many to add.
"""
category_id = self.get(category)
if category_id <= 0:
return
self._freq[category] += count
def trim(self, min_frequency, max_frequency=-1):
"""Trims vocabulary for minimum frequency.
Remaps ids from 1..n in sort frequency order.
where n - number of elements left.
Args:
min_frequency: minimum frequency to keep.
max_frequency: optional, maximum frequency to keep.
Useful to remove very frequent categories (like stop words).
"""
# Sort by alphabet then reversed frequency.
self._freq = sorted(
sorted(
six.iteritems(self._freq),
key=lambda x: (isinstance(x[0], str), x[0])),
key=lambda x: x[1],
reverse=True)
self._mapping = {self._unknown_token: 0}
if self._support_reverse:
self._reverse_mapping = [self._unknown_token]
idx = 1
for category, count in self._freq:
if max_frequency > 0 and count >= max_frequency:
continue
if count <= min_frequency:
break
self._mapping[category] = idx
idx += 1
if self._support_reverse:
self._reverse_mapping.append(category)
self._freq = dict(self._freq[:idx - 1])
def reverse(self, class_id):
"""Given class id reverse to original class name.
Args:
class_id: Id of the class.
Returns:
Class name.
Raises:
ValueError: if this vocabulary wasn't initalized with support_reverse.
"""
if not self._support_reverse:
raise ValueError("This vocabulary wasn't initalized with "
"support_reverse to support reverse() function.")
return self._reverse_mapping[class_id]
| apache-2.0 |
Kjili/analysis-preservation.cern.ch | cap/modules/experiments/permissions/atlas.py | 2 | 1583 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP ATLAS permissions"""
from cap.modules.experiments.permissions.common import get_collaboration_group_needs, get_superuser_needs
from invenio_access import DynamicPermission
atlas_group_need = set(
[g for g in get_collaboration_group_needs('ATLAS')])
atlas_group_need |= set([g for g in
get_superuser_needs()])
atlas_permission = DynamicPermission(*atlas_group_need)
def atlas_permission_factory(*args):
return DynamicPermission(*atlas_group_need)
| gpl-2.0 |
nanolearningllc/edx-platform-cypress-2 | common/djangoapps/student/views.py | 12 | 91744 | """
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: A dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return None
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None
}
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
certificate_url = get_certificate_url(
user_id=user.id,
course_id=unicode(course_overview.id),
)
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(url=certificate_url)
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
if linkedin_config.enabled:
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already verified and if verified is an
# option
if 'verified' in modes and enrollment.mode != 'verified':
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'):
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@require_POST
@commit_on_success_with_read_committed
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (honor)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "honor".
try:
CourseEnrollment.enroll(user, course_id, check_access=check_access)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
if not CourseEnrollment.is_enrolled(user, course_id):
return HttpResponseBadRequest(_("You are not enrolled in this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page.").format(
platform_name=platform_name),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username,
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.commit_on_success():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': user.email,
'username': user.username,
})
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response
if request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
@transaction.commit_manually
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
try:
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.rollback()
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.rollback()
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.rollback()
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.rollback()
return response
response = render_to_response("email_change_successful.html", address_context)
transaction.commit()
return response
except Exception: # pylint: disable=broad-except
# If we get an unexpected exception, be sure to rollback the transaction
transaction.rollback()
raise
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
| agpl-3.0 |
a10networks/a10sdk-python | a10sdk/core/ip/ip_dns_secondary.py | 2 | 1344 | from a10sdk.common.A10BaseClass import A10BaseClass
class Secondary(A10BaseClass):
"""Class Description::
Secondary DNS server.
Class secondary supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ip_v4_addr: {"not": "ip-v6-addr", "optional": true, "type": "string", "description": "DNS server address", "format": "ipv4-address"}
:param ip_v6_addr: {"not": "ip-v4-addr", "optional": true, "type": "string", "description": "DNS server address", "format": "ipv6-address"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ip/dns/secondary`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "secondary"
self.a10_url="/axapi/v3/ip/dns/secondary"
self.DeviceProxy = ""
self.ip_v4_addr = ""
self.ip_v6_addr = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 |
myerssr/volatility | volatility/plugins/mac/arp.py | 58 | 1398 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.plugins.mac.route as route
class mac_arp(route.mac_route):
""" Prints the arp table """
def calculate(self):
common.set_plugin_members(self)
arp_addr = self.addr_space.profile.get_symbol("_llinfo_arp")
ptr = obj.Object("Pointer", offset = arp_addr, vm = self.addr_space)
ent = ptr.dereference_as("llinfo_arp")
while ent:
yield ent.la_rt
ent = ent.la_le.le_next
| gpl-2.0 |
jaggu303619/asylum | setup.py | 15 | 5075 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
from setuptools import find_packages, setup
from os.path import join, dirname
execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables
lib_name = 'openerp'
def py2exe_datafiles():
data_files = {}
data_files['Microsoft.VC90.CRT'] = glob('C:\Microsoft.VC90.CRT\*.*')
for root, dirnames, filenames in os.walk('openerp'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
data_files.setdefault(root, []).append(join(root, filename))
import babel
data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others)
import pytz
tzdir = dirname(pytz.__file__)
for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')):
base = join('pytz', root[len(tzdir) + 1:])
data_files[base] = [join(root, f) for f in filenames]
import docutils
dudir = dirname(docutils.__file__)
for root, _, filenames in os.walk(dudir):
base = join('docutils', root[len(dudir) + 1:])
data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
return data_files.items()
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
'console': [
{'script': 'openerp-server', 'icon_resources': [
(1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico'))
]},
],
'options': {
'py2exe': {
'skip_archive': 1,
'optimize': 0, # Keep the assert running as the integrated tests rely on them.
'dist_dir': 'dist',
'packages': [
'asynchat', 'asyncore',
'commands',
'dateutil',
'decimal',
'docutils',
'email',
'encodings',
'HTMLParser',
'imaplib',
'jinja2',
'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify',
'mako',
'markupsafe',
'mock',
'openerp',
'openid',
'PIL',
'poplib',
'psutil',
'pychart',
'pydot',
'pyparsing',
'pytz',
'reportlab',
'select',
'simplejson',
'smtplib',
'uuid',
'vatnumber',
'vobject',
'win32service', 'win32serviceutil',
'xlwt',
'xml', 'xml.dom',
'yaml',
],
'excludes': ['Tkconstants', 'Tkinter', 'tcl'],
}
},
'data_files': py2exe_datafiles()
}
else:
return {}
setup(
name='openerp',
version=version,
description=description,
long_description=long_desc,
url=url,
author=author,
author_email=author_email,
classifiers=filter(None, classifiers.split('\n')),
license=license,
scripts=['openerp-server'],
packages=find_packages(),
package_dir={'%s' % lib_name: 'openerp'},
include_package_data=True,
dependency_links=['http://download.gna.org/pychart/'],
install_requires=[
'babel',
'docutils',
'feedparser',
'gdata',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'mako',
'mock',
'PIL', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycopg2 >= 2.2',
'pychart', # not on pypi, use: pip install http://download.gna.org/pychart/PyChart-1.39.tar.gz
'pydot',
'python-dateutil < 2',
'python-ldap', # optional
'python-openid',
'pytz',
'pywebdav < 0.9.8',
'pyyaml',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'simplejson',
'unittest2',
'vatnumber',
'vobject',
'werkzeug',
'xlwt',
],
extras_require={
'SSL': ['pyopenssl'],
},
tests_require=[
'unittest2',
],
**py2exe_options()
)
| agpl-3.0 |
GaetanCambier/CouchPotatoServer | libs/axl/axel.py | 65 | 13262 | # axel.py
#
# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom
# Edits by Ruud Burger
#
# Based on an idea by Peter Thatcher, found on
# http://www.valuedlessons.com/2008/04/events-in-python.html
#
# This module is part of Axel and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Source: http://pypi.python.org/pypi/axel
# Docs: http://packages.python.org/axel
from Queue import Empty, Queue
import hashlib
import sys
import threading
from couchpotato.core.helpers.variable import natsortKey
class Event(object):
"""
Event object inspired by C# events. Handlers can be registered and
unregistered using += and -= operators. Execution and result are
influenced by the arguments passed to the constructor and += method.
from axel import Event
event = Event()
def on_event(*args, **kwargs):
return (args, kwargs)
event += on_event # handler registration
print(event(10, 20, y=30))
>> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),)
event -= on_event # handler is unregistered
print(event(10, 20, y=30))
>> None
class Mouse(object):
def __init__(self):
self.click = Event(self)
self.click += self.on_click # handler registration
def on_click(self, sender, *args, **kwargs):
assert isinstance(sender, Mouse), 'Wrong sender'
return (args, kwargs)
mouse = Mouse()
print(mouse.click(10, 20))
>> ((True, ((10, 20), {}),
>> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),)
mouse.click -= mouse.on_click # handler is unregistered
print(mouse.click(10, 20))
>> None
"""
def __init__(self, name = None, sender = None, asynch = False, exc_info = False,
lock = None, threads = 3, traceback = False):
""" Creates an event
asynch
if True handler's are executes asynchronous
exc_info
if True, result will contain sys.exc_info()[:2] on error
lock
threading.RLock used to synchronize execution
sender
event's sender. The sender is passed as the first argument to the
handler, only if is not None. For this case the handler must have
a placeholder in the arguments to receive the sender
threads
maximum number of threads that will be started
traceback
if True, the execution result will contain sys.exc_info()
on error. exc_info must be also True to get the traceback
hash = self.hash(handler)
Handlers are stored in a dictionary that has as keys the handler's hash
handlers = {
hash : (handler, memoize, timeout),
hash : (handler, memoize, timeout), ...
}
The execution result is cached using the following structure
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
The execution result is returned as a tuple having this structure
exec_result = (
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
)
"""
self.name = name
self.asynchronous = asynch
self.exc_info = exc_info
self.lock = lock
self.sender = sender
self.threads = threads
self.traceback = traceback
self.handlers = {}
self.memoize = {}
def hash(self, handler):
return hashlib.md5(str(handler)).hexdigest()
def handle(self, handler, priority = 0):
""" Registers a handler. The handler can be transmitted together
with two arguments as a list or dictionary. The arguments are:
memoize
if True, the execution result will be cached in self.memoize
timeout
will allocate a predefined time interval for the execution
If arguments are provided as a list, they are considered to have
this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
handler_, memoize, timeout = self._extract(handler)
self.handlers['%s.%s' % (priority, self.hash(handler_))] = (handler_, memoize, timeout)
return self
def unhandle(self, handler):
""" Unregisters a handler """
handler_, memoize, timeout = self._extract(handler)
key = self.hash(handler_)
if not key in self.handlers:
raise ValueError('Handler "%s" was not found' % str(handler_))
del self.handlers[key]
return self
def fire(self, *args, **kwargs):
""" Stores all registered handlers in a queue for processing """
self.queue = Queue()
result = {}
if self.handlers:
max_threads = 1 if kwargs.get('event_order_lock') else self._threads()
# Set global result
def add_to(key, value):
result[key] = value
kwargs['event_add_to_result'] = add_to
for i in range(max_threads):
t = threading.Thread(target = self._execute,
args = args, kwargs = kwargs)
t.daemon = True
t.start()
handler_keys = self.handlers.keys()
handler_keys.sort(key = natsortKey)
for handler in handler_keys:
self.queue.put(handler)
if self.asynchronous:
handler_, memoize, timeout = self.handlers[handler]
result[handler] = (None, None, handler_)
if not self.asynchronous:
self.queue.join()
return result
def count(self):
""" Returns the count of registered handlers """
return len(self.handlers)
def clear(self):
""" Discards all registered handlers and cached results """
self.handlers.clear()
self.memoize.clear()
def _execute(self, *args, **kwargs):
# Remove get and set from kwargs
add_to_result = kwargs.get('event_add_to_result')
del kwargs['event_add_to_result']
# Get and remove order lock
order_lock = kwargs.get('event_order_lock')
try: del kwargs['event_order_lock']
except: pass
# Get and remove return on first
return_on_result = kwargs.get('event_return_on_result')
try: del kwargs['event_return_on_result']
except: pass
got_results = False
""" Executes all handlers stored in the queue """
while True:
try:
h_ = self.queue.get(timeout = 2)
handler, memoize, timeout = self.handlers[h_]
if return_on_result and got_results:
if not self.asynchronous:
self.queue.task_done()
continue
if order_lock:
order_lock.acquire()
try:
r = self._memoize(memoize, timeout, handler, *args, **kwargs)
if not self.asynchronous:
if not return_on_result or (return_on_result and r[1] is not None):
add_to_result(h_, tuple(r))
got_results = True
except Exception:
if not self.asynchronous:
add_to_result(h_, (False, self._error(sys.exc_info()),
handler))
else:
self.error_handler(sys.exc_info())
finally:
if order_lock:
order_lock.release()
if not self.asynchronous:
self.queue.task_done()
if self.queue.empty():
raise Empty
except Empty:
break
def _extract(self, queue_item):
""" Extracts a handler and handler's arguments that can be provided
as list or dictionary. If arguments are provided as list, they are
considered to have this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
assert queue_item, 'Invalid list of arguments'
handler = None
memoize = False
timeout = 0
if not isinstance(queue_item, (list, tuple, dict)):
handler = queue_item
elif isinstance(queue_item, (list, tuple)):
if len(queue_item) == 3:
handler, memoize, timeout = queue_item
elif len(queue_item) == 2:
handler, memoize, = queue_item
elif len(queue_item) == 1:
handler = queue_item
elif isinstance(queue_item, dict):
handler = queue_item.get('handler')
memoize = queue_item.get('memoize', False)
timeout = queue_item.get('timeout', 0)
return (handler, bool(memoize), float(timeout))
def _memoize(self, memoize, timeout, handler, *args, **kwargs):
""" Caches the execution result of successful executions
hash = self.hash(handler)
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
"""
if not isinstance(handler, Event) and self.sender is not None:
args = list(args)[:]
args.insert(0, self.sender)
if not memoize:
if timeout <= 0: #no time restriction
result = [True, handler(*args, **kwargs), handler]
return result
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
return [True, result, handler]
else:
hash_ = self.hash(handler)
if hash_ in self.memoize:
for args_, kwargs_, result in self.memoize[hash_]:
if args_ == args and kwargs_ == kwargs:
return [True, result, handler]
if timeout <= 0: #no time restriction
result = handler(*args, **kwargs)
else:
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
lock = threading.RLock()
lock.acquire()
try:
if hash_ not in self.memoize:
self.memoize[hash_] = []
self.memoize[hash_].append((args, kwargs, result))
return [True, result, handler]
finally:
lock.release()
def _timeout(self, timeout, handler, *args, **kwargs):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target = handler, args = args, kwargs = kwargs)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info()
def _threads(self):
""" Calculates maximum number of threads that will be started """
if self.threads < len(self.handlers):
return self.threads
return len(self.handlers)
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = count
class spawn_thread(threading.Thread):
""" Spawns a new thread and returns the execution result """
def __init__(self, target, args = (), kwargs = {}, default = None):
threading.Thread.__init__(self)
self._target = target
self._args = args
self._kwargs = kwargs
self.result = default
self.exc_info = None
def run(self):
try:
self.result = self._target(*self._args, **self._kwargs)
except:
self.exc_info = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
| gpl-3.0 |
marratj/ansible | lib/ansible/template/vars.py | 32 | 4293 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Mapping
from jinja2.utils import missing
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars(Mapping):
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in iteritems(locals):
if val is not missing:
if key[:2] == 'l_':
self._locals[key[2:]] = val
elif key not in ('context', 'environment', 'template'):
self._locals[key] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __iter__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return iter(keys)
def __len__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return len(keys)
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
return variable
else:
value = None
try:
value = self._templar.template(variable)
except Exception as e:
raise type(e)(to_native(variable) + ': ' + e.message)
return value
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
| gpl-3.0 |
samuelfekete/Pythonometer | tests/test_questions.py | 1 | 1786 | """Test all questions."""
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pythonometer.quiz import all_questions
from pythonometer.questions.base import WrongAnswer
class TestQuestions(unittest.TestCase):
"""Test the questions.
All question tests are the same, so they are loaded dynamically.
"""
pass
# Add a test for every question.
for question in all_questions():
def question_test(self, question=question):
current_question = question()
# Assert that a question string is supplied.
question_string = current_question.get_question_text()
self.assertIsInstance(question_string, basestring)
# Assert that at least one correct answer is given.
self.assert_(current_question.get_correct_answers())
# Assert that checking with the correct answers returns True.
for correct_answer in current_question.get_correct_answers():
self.assert_(current_question.check_answer(correct_answer))
# Assert that checking with the wrong answers raises WrongAnswer.
for wrong_answer in current_question.get_wrong_answers():
with self.assertRaises(WrongAnswer):
current_question.check_answer(wrong_answer)
# Assert that checking a wrong answer raises WrongAnswer.
with self.assertRaises(WrongAnswer):
current_question.check_answer('')
# Assert that checking the answer with bad code raises WrongAnswer.
with self.assertRaises(WrongAnswer):
current_question.check_answer('raise Exception')
setattr(TestQuestions, 'test_{}'.format(question.__name__), question_test)
if __name__ == '__main__':
unittest.main()
| mit |
bowlofstew/changes | changes/api/job_log_details.py | 2 | 2316 | from __future__ import absolute_import, division, unicode_literals
from flask import Response, request
from changes.api.base import APIView
from changes.models import LogSource, LogChunk
LOG_BATCH_SIZE = 50000 # in length of chars
class JobLogDetailsAPIView(APIView):
def get(self, job_id, source_id):
"""
Return chunks for a LogSource.
"""
source = LogSource.query.get(source_id)
if source is None or source.job_id != job_id:
return '', 404
offset = int(request.args.get('offset', -1))
limit = int(request.args.get('limit', -1))
raw = request.args.get('raw')
if raw and limit == -1:
limit = 0
elif limit == -1:
limit = LOG_BATCH_SIZE
queryset = LogChunk.query.filter(
LogChunk.source_id == source.id,
).order_by(LogChunk.offset.desc())
if offset == -1:
# starting from the end so we need to know total size
tail = queryset.limit(1).first()
if tail is None:
logchunks = []
else:
if limit:
queryset = queryset.filter(
(LogChunk.offset + LogChunk.size) >= max(tail.offset + tail.size - limit, 0),
)
logchunks = list(queryset)
else:
queryset = queryset.filter(
LogChunk.offset > offset,
)
if limit:
queryset = queryset.filter(
LogChunk.offset <= offset + limit,
)
logchunks = list(queryset)
logchunks.sort(key=lambda x: x.date_created)
if logchunks:
next_offset = logchunks[-1].offset + logchunks[-1].size + 1
else:
next_offset = offset
if raw:
return Response(''.join(l.text for l in logchunks), mimetype='text/plain')
context = self.serialize({
'source': source,
'chunks': logchunks,
'nextOffset': next_offset,
})
context['source']['step'] = self.serialize(source.step)
if source.step:
context['source']['step']['phase'] = self.serialize(source.step.phase),
return self.respond(context, serialize=False)
| apache-2.0 |
isaac-philip/loolu | common/django/utils/http.py | 29 | 3849 | import re
import urllib
from email.Utils import formatdate
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), safe))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), safe))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convertd a base 36 string to an integer
"""
return int(s, 36)
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
| mit |
helldorado/ansible | lib/ansible/plugins/lookup/pipe.py | 150 | 2325 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: pipe
author: Daniel Hokka Zakrisson <daniel@hozac.com>
version_added: "0.9"
short_description: read output from a command
description:
- Run a command and return the output
options:
_terms:
description: command(s) to run
required: True
notes:
- Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
so if you need to different permissions you must change the command or run Ansible as another user.
- Alternatively you can use a shell/command task that runs against localhost and registers the result.
"""
EXAMPLES = """
- name: raw result of running date command"
debug: msg="{{ lookup('pipe','date') }}"
- name: Always use quote filter to make sure your variables are safe to use with shell
debug: msg="{{ lookup('pipe','getent ' + myuser|quote ) }}"
"""
RETURN = """
_string:
description:
- stdout from command
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
term = str(term)
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| gpl-3.0 |
koharjidan/dogecoin | qa/rpc-tests/smartfees.py | 6 | 12419 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs[P2SH_1] = total_in - amount - fee
outputs[P2SH_2] = amount
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
outputs = {}
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs[P2SH_1] = half_change
outputs[P2SH_2] = rem_change
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in filter(lambda x: x >= 0, all_estimates):
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for e in all_estimates:
if e >= 0:
valid_estimate = True
else:
invalid_estimates += 1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate and e < 0:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-relaypriority=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=18000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 70 or so transactions)
node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex)/2)/1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Checking estimates for 1/2/3/6/15/25 blocks")
print("Creating transactions and mining them with a huge block size")
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
self.transact_and_mine(60, self.nodes[0])
check_estimates(self.nodes[1], self.fees_per_kb, 1)
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(20, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 3)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 40 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(40, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| mit |
i5o/sugar-toolkit-gtk3 | examples/radiotoolbutton.py | 2 | 1488 | from gi.repository import Gtk
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.radiopalette import RadioPalette, RadioMenuButton
from sugar3.graphics.xocolor import XoColor
from common import set_theme
set_theme()
window = Gtk.Window()
window.show()
window.connect("delete-event", Gtk.main_quit)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
window.add(box)
box.show()
def echo(button, label):
if not button.props.active:
return
print label
palette = RadioPalette()
# Adding 3 RadioToolButtons to a palette
button1 = RadioToolButton(icon_name='document-save', accelerator="<ctrl>S",
xo_color=XoColor("white"))
button1.connect('toggled', lambda button: echo(button, 'document-save'))
palette.append(button1, 'menu.document-save')
button2 = RadioToolButton(icon_name='document-open', accelerator="<ctrl>O",
xo_color=XoColor("white"), group=button1)
button2.connect('toggled', lambda button: echo(button, 'document-open'))
palette.append(button2, 'menu.document-open')
button3 = RadioToolButton(icon_name='document-send', accelerator="<ctrl>F",
xo_color=XoColor("white"), group=button1)
button3.connect('toggled', lambda button: echo(button, 'document-send'))
palette.append(button3, 'menu.document-send')
button = RadioMenuButton(palette=palette)
box.pack_start(button, False, False, 1)
button.show()
if __name__ == '__main__':
Gtk.main()
| lgpl-2.1 |
cchurch/ansible | lib/ansible/plugins/lookup/nios_next_network.py | 77 | 4174 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
lookup: nios_next_network
version_added: "2.7"
short_description: Return the next available network range for a network-container
description:
- Uses the Infoblox WAPI API to return the next available network addresses for
a given network CIDR
requirements:
- infoblox_client
extends_documentation_fragment: nios
options:
_terms:
description: The CIDR network to retrieve the next network from next available network within the specified
container.
required: True
cidr:
description:
- The CIDR of the network to retrieve the next network from next available network within the
specified container. Also, Requested CIDR must be specified and greater than the parent CIDR.
required: True
default: 24
num:
description: The number of network addresses to return from network-container
required: false
default: 1
exclude:
description: Network addresses returned from network-container excluding list of user's input network range
required: false
default: ''
"""
EXAMPLES = """
- name: return next available network for network-container 192.168.10.0/24
set_fact:
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the next 2 available network addresses for network-container 192.168.10.0/24
set_fact:
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, num=2,
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
set_fact:
networkaddr: "{{ lookup('nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
"""
RETURN = """
_list:
description:
- The list of next network addresses available
returned: always
type: list
"""
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.net_tools.nios.api import WapiLookup
from ansible.module_utils._text import to_text
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
try:
network = terms[0]
except IndexError:
raise AnsibleError('missing network argument in the form of A.B.C.D/E')
try:
cidr = kwargs.get('cidr', 24)
except IndexError:
raise AnsibleError('missing CIDR argument in the form of xx')
provider = kwargs.pop('provider', {})
wapi = WapiLookup(provider)
network_obj = wapi.get_object('networkcontainer', {'network': network})
if network_obj is None:
raise AnsibleError('unable to find network-container object %s' % network)
num = kwargs.get('num', 1)
exclude_ip = kwargs.get('exclude', [])
try:
ref = network_obj[0]['_ref']
avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip})
return [avail_nets['networks']]
except Exception as exc:
raise AnsibleError(to_text(exc))
| gpl-3.0 |
Nicop06/ansible | lib/ansible/modules/cloud/amazon/iam_policy.py | 19 | 14055 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
policy_name:
description:
- The name label for the policy to create or remove.
required: true
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document),
see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with
the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a policy with the name of 'Admin' to the group 'administrators'
tasks:
- name: Assign a policy called Admin to the administrators group
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name: Apply READ-ONLY policy to new groups that have been recently created
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
with_items: "{{ new_groups.results }}"
# Create a new S3 policy with prefix per user
tasks:
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
with_items:
- user: s3_user
prefix: s3_user_prefix
'''
import json
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info, boto_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import urllib
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.parse.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
else:
module.fail_json(msg=e.message)
try:
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
else:
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
policy_json=dict(type='json', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') is not None and module.params.get('policy_json') is not None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if module.params.get('policy_document') is not None:
with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
elif module.params.get('policy_json') is not None:
pdoc = module.params.get('policy_json')
# if its a string, assume it is already JSON
if not isinstance(pdoc, string_types):
try:
pdoc = json.dumps(pdoc)
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
pdoc=None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
iocast/poiservice | lib/FilterEncodingWizard.py | 1 | 2742 | '''
Created on May 16, 2011
@author: michel
'''
import json
class FilterEncodingWizard(object):
comparision = [{
'value' : 'PropertyIsEqualTo',
'display' : '=',
'xml' : '<PropertyIsEqualTo><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsEqualTo>'},
{'value' : 'PropertyIsNotEqualTo',
'display' : '!=',
'xml' : '<PropertyIsNotEqualTo><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsNotEqualTo>'},
{'value' : 'PropertyIsLessThan',
'display' : '<',
'xml' : '<PropertyIsLessThan><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsLessThan>'},
{'value' : 'PropertyIsGreaterThan',
'display' : '>',
'xml' : '<PropertyIsGreaterThan><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsGreaterThan>'},
{'value' : 'PropertyIsLessThanOrEqualTo',
'display' : '<=',
'xml' : '<PropertyIsLessThanOrEqualTo><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsLessThanOrEqualTo>'},
{'value' : 'PropertyIsGreaterThanOrEqualTo',
'display' : '>=',
'xml' : '<PropertyIsGreaterThanOrEqualTo><PropertyName>${value}</PropertyName><Literal>${literal}</Literal></PropertyIsGreaterThanOrEqualTo>'}
#{'value' : 'PropertyIsLike',
# 'display' : 'Like',
# 'xml' : ''},
#{'value' : 'PropertyIsBetween',
# 'display' : 'Between',
# 'xml' : ''},
#{'value' : 'PropertyIsNull',
# 'display' : 'Nul',
# 'xml' : ''}
]
logical = [
{'value' : 'Or',
'display' : 'or',
'xml' : '<Or>${statement}</Or>'},
{
'value' : 'And',
'display' : 'and',
'xml' : '<And>${statement}</And>'}
]
def comparisonToJson(self):
return json.dumps(self.comparision)
def comparisonToHTML(self):
html = '<select onChange="javascript:queryBuilder.operatorChanged(this);">'
for value in self.comparision:
html += '<option value="' + value['value'] + '">' + value['display'] + '</option>'
html += '</select>'
return html
def logicalToJson(self):
return json.dumps(self.logical)
def logicalToHTML(self):
html = '<select>'
for value in self.logical:
html += '<option value="' + value['value'] + '">' + value['display'] + '</option>'
html += '</select>'
return html;
| mit |
triggerNZ/illacceptanything | code/recursive_cmp.py | 12 | 1262 | #!/usr/bin/env python3
CHECK_CONTENT = False
import os
import sys
import subprocess
join = os.path.join
def compare_dirs(dir1, dir2):
l1 = set(os.listdir(dir1))
l2 = set(os.listdir(dir2))
if l1 != l2:
print('“%s” and “%s” differ:' % (dir1, dir2))
print('\tfirst has: %s' % (l1-l2))
print('\tsecond has: %s' % (l2-l1))
for filename in (l1 & l2):
compare(join(dir1, filename), join(dir2, filename))
def compare_files(f1, f2):
if CHECK_CONTENT:
subprocess.call(['cmp', f1, f2])
def compare(dir1, dir2):
if os.path.isdir(dir1) and os.path.isdir(dir2):
return compare_dirs(dir1, dir2)
elif os.path.isfile(dir1) and os.path.isfile(dir2):
return compare_files(dir1, dir2)
elif not os.path.isdir(dir1) and not os.path.isfile(dir1) and \
not os.path.isdir(dir2) and not os.path.isfile(dir2):
print('Skipping “%s” and “%s”: neither file or dir.' % (dir1, dir2))
else:
print('“%s” and “%s” are not the same type (dir/file).' % (dir1, dir2))
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Syntax: %s dir1 dir2' % sys.argv[0])
exit(1)
(prog, dir1, dir2) = sys.argv
compare(dir1, dir2)
| mit |
wagnerand/addons-server | src/olympia/zadmin/admin.py | 4 | 2189 | from django.conf import settings
from django.contrib import admin, auth
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from django.utils.html import format_html
from django.urls import reverse
from olympia.accounts.utils import redirect_for_login
from . import models
def related_content_link(
obj, related_class, related_field, related_manager='objects', text=None
):
"""
Return a link to the admin changelist for the instances of related_class
linked to the object.
"""
url = 'admin:{}_{}_changelist'.format(
related_class._meta.app_label, related_class._meta.model_name
)
if text is None:
qs = getattr(related_class, related_manager).filter(**{related_field: obj})
text = qs.count()
return format_html(
'<a href="{}?{}={}">{}</a>', reverse(url), related_field, obj.pk, text
)
def related_single_content_link(obj, related_field):
"""
Return a link to the admin change page for a related instance linked to the
object.
"""
instance = getattr(obj, related_field)
if instance:
related_class = instance._meta.model
url = 'admin:{}_{}_change'.format(
related_class._meta.app_label, related_class._meta.model_name
)
return format_html(
'<a href="{}">{}</a>', reverse(url, args=(instance.pk,)), repr(instance)
)
else:
return ''
# Hijack the admin's login to use our pages.
def login(request):
# if the user has permission, just send them to the index page
if request.method == 'GET' and admin.site.has_permission(request):
next_path = request.GET.get(auth.REDIRECT_FIELD_NAME)
return redirect(next_path or 'admin:index')
# otherwise, they're logged in but don't have permission return a 403.
elif request.user.is_authenticated:
raise PermissionDenied
else:
return redirect_for_login(request)
admin.site.register(models.Config)
admin.site.disable_action('delete_selected')
admin.site.site_url = settings.EXTERNAL_SITE_URL
admin.site.site_header = admin.site.index_title = 'AMO Administration'
admin.site.login = login
| bsd-3-clause |
lepistone/odoo | addons/website_forum/models/res_users.py | 21 | 2538 | # -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class Users(osv.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(set(
self.SELF_WRITEABLE_FIELDS + \
['country_id', 'city', 'website', 'website_description']))
return init_res
def _get_user_badge_level(self, cr, uid, ids, name, args, context=None):
"""Return total badge per level of users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool['gamification.badge.user']
for id in ids:
result[id] = {
'gold_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'gold'), ('user_id', '=', id)], context=context, count=True),
'silver_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'silver'), ('user_id', '=', id)], context=context, count=True),
'bronze_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'bronze'), ('user_id', '=', id)], context=context, count=True),
}
return result
_columns = {
'create_date': fields.datetime('Create Date', select=True, readonly=True),
'karma': fields.integer('Karma'),
'badge_ids': fields.one2many('gamification.badge.user', 'user_id', 'Badges'),
'gold_badge': fields.function(_get_user_badge_level, string="Number of gold badges", type='integer', multi='badge_level'),
'silver_badge': fields.function(_get_user_badge_level, string="Number of silver badges", type='integer', multi='badge_level'),
'bronze_badge': fields.function(_get_user_badge_level, string="Number of bronze badges", type='integer', multi='badge_level'),
}
_defaults = {
'karma': 0,
}
def add_karma(self, cr, uid, ids, karma, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [user.id], {'karma': user.karma + karma}, context=context)
return True
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(cr, uid, excluded_categories=excluded_categories, context=context)
| agpl-3.0 |
takeshineshiro/cinder | cinder/tests/unit/api/common.py | 44 | 1204 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def compare_links(actual, expected):
"""Compare xml atom links."""
return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type'))
def compare_media_types(actual, expected):
"""Compare xml media types."""
return compare_tree_to_dict(actual, expected, ('base', 'type'))
def compare_tree_to_dict(actual, expected, keys):
"""Compare parts of lxml.etree objects to dicts."""
for elem, data in zip(actual, expected):
for key in keys:
if elem.get(key) != data.get(key):
return False
return True
| apache-2.0 |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/webpagereplay/rules_parser.py | 30 | 5179 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Rules parser.
The input syntax is:
[{"comment": ignored_value},
{"rule_class_name1": {"arg1": value, "arg2": value, ...}},
{"rule_class_name2": {"arg1": value, "arg2": value, ...}},
...]
E.g.:
[{"comment": "this text is ignored"},
{"SendStatus": {"url": "example\\.com/ss.*", "status": 204}},
{"ModifyUrl": {"url": "(example\\.com)(/.*)", "new_url": "{1}"}}
]
"""
import json
import re
class Error(Exception):
pass
class Rules(object):
"""A parsed sequence of Rule objects."""
def __init__(self, file_obj=None, allowed_imports=None):
"""Initializes from the given file object.
Args:
file_obj: A file object.
allowed_imports: A set of strings, defaults to {'rules'}.
Use {'*'} to allow any import path.
"""
if allowed_imports is None:
allowed_imports = {'rules'}
self._rules = [] if file_obj is None else _Load(file_obj, allowed_imports)
def Contains(self, rule_type_name):
"""Returns true if any rule matches the given type name.
Args:
rule_type_name: a string.
Returns:
True if any rule matches, else False.
"""
return any(rule for rule in self._rules if rule.IsType(rule_type_name))
def Find(self, rule_type_name):
"""Returns a _Rule object containing all rules with the given type name.
Args:
rule_type_name: a string.
Returns:
A callable object that expects two arguments:
request: the httparchive ArchivedHttpRequest
response: the httparchive ArchivedHttpResponse
and returns the rule return_value of the first rule that returns
should_stop == True, or the last rule's return_value if all rules returns
should_stop == False.
"""
matches = [rule for rule in self._rules if rule.IsType(rule_type_name)]
return _Rule(matches)
def __str__(self):
return _ToString(self._rules)
def __repr__(self):
return str(self)
class _Rule(object):
"""Calls a sequence of Rule objects until one returns should_stop."""
def __init__(self, rules):
self._rules = rules
def __call__(self, request, response):
"""Calls the rules until one returns should_stop.
Args:
request: the httparchive ArchivedHttpRequest.
response: the httparchive ArchivedHttpResponse, which may be None.
Returns:
The rule return_value of the first rule that returns should_stop == True,
or the last rule's return_value if all rules return should_stop == False.
"""
return_value = None
for rule in self._rules:
should_stop, return_value = rule.ApplyRule(
return_value, request, response)
if should_stop:
break
return return_value
def __str__(self):
return _ToString(self._rules)
def __repr__(self):
return str(self)
def _ToString(rules):
"""Formats a sequence of Rule objects into a string."""
return '[\n%s\n]' % '\n'.join('%s' % rule for rule in rules)
def _Load(file_obj, allowed_imports):
"""Parses and evaluates all rules in the given file.
Args:
file_obj: a file object.
allowed_imports: a sequence of strings, e.g.: {'rules'}.
Returns:
a list of rules.
"""
rules = []
entries = json.load(file_obj)
if not isinstance(entries, list):
raise Error('Expecting a list, not %s', type(entries))
for i, entry in enumerate(entries):
if not isinstance(entry, dict):
raise Error('%s: Expecting a dict, not %s', i, type(entry))
if len(entry) != 1:
raise Error('%s: Expecting 1 item, not %d', i, len(entry))
name, args = next(entry.iteritems())
if not isinstance(name, basestring):
raise Error('%s: Expecting a string TYPE, not %s', i, type(name))
if not re.match(r'(\w+\.)*\w+$', name):
raise Error('%s: Expecting a classname TYPE, not %s', i, name)
if name == 'comment':
continue
if not isinstance(args, dict):
raise Error('%s: Expecting a dict ARGS, not %s', i, type(args))
fullname = str(name)
if '.' not in fullname:
fullname = 'rules.%s' % fullname
modulename, classname = fullname.rsplit('.', 1)
if '*' not in allowed_imports and modulename not in allowed_imports:
raise Error('%s: Package %r is not in allowed_imports', i, modulename)
module = __import__(modulename, fromlist=[classname])
clazz = getattr(module, classname)
missing = {s for s in ('IsType', 'ApplyRule') if not hasattr(clazz, s)}
if missing:
raise Error('%s: %s lacks %s', i, clazz.__name__, ' and '.join(missing))
rule = clazz(**args)
rules.append(rule)
return rules
| bsd-3-clause |
akarki15/mozillians | vendor-local/lib/python/tablib/packages/openpyxl/shared/exc.py | 118 | 2259 | # file openpyxl/shared/exc.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Definitions for openpyxl shared exception classes."""
class CellCoordinatesException(Exception):
"""Error for converting between numeric and A1-style cell references."""
class ColumnStringIndexException(Exception):
"""Error for bad column names in A1-style cell references."""
class DataTypeException(Exception):
"""Error for any data type inconsistencies."""
class NamedRangeException(Exception):
"""Error for badly formatted named ranges."""
class SheetTitleException(Exception):
"""Error for bad sheet names."""
class InsufficientCoordinatesException(Exception):
"""Error for partially specified cell coordinates."""
class OpenModeError(Exception):
"""Error for fileobj opened in non-binary mode."""
class InvalidFileException(Exception):
"""Error for trying to open a non-ooxml file."""
class ReadOnlyWorkbookException(Exception):
"""Error for trying to modify a read-only workbook"""
class MissingNumberFormat(Exception):
"""Error when a referenced number format is not in the stylesheet"""
| bsd-3-clause |
tanglu-org/tgl-misago | misago/apps/admin/bans/views.py | 3 | 5489 | from django.core.urlresolvers import reverse as django_reverse
from django.db.models import Q
from django.template import RequestContext
from django.utils.translation import ugettext as _
from misago import messages
from misago.admin import site
from misago.apps.admin.widgets import *
from misago.messages import Message
from misago.models import Ban
from misago.monitor import monitor, UpdatingMonitor
from misago.apps.admin.bans.forms import BanForm, SearchBansForm
def reverse(route, target=None):
if target:
return django_reverse(route, kwargs={'target': target.pk})
return django_reverse(route)
"""
Views
"""
class List(ListWidget):
"""
List Bans
"""
admin = site.get_action('bans')
id = 'list'
columns = (
('ban', _("Ban"), 50),
('expires', _("Expires")),
)
default_sorting = 'expires'
sortables = {
'ban': 1,
'expires': 0,
}
pagination = 20
search_form = SearchBansForm
empty_message = _('No bans are currently set.')
empty_search_message = _('No bans have been found.')
nothing_checked_message = _('You have to check at least one ban.')
actions = (
('delete', _("Lift selected bans"), _("Are you sure you want to lift selected bans?")),
)
def set_filters(self, model, filters):
if 'ban' in filters:
model = model.filter(ban__contains=filters['ban'])
if 'reason' in filters:
model = model.filter(Q(reason_user__contains=filters['reason']) | Q(reason_admin__contains=filters['reason']))
if 'test' in filters:
model = model.filter(test__in=filters['test'])
return model
def get_item_actions(self, item):
return (
self.action('pencil', _("Edit Ban"), reverse('admin_bans_edit', item)),
self.action('remove', _("Lift Ban"), reverse('admin_bans_delete', item), post=True, prompt=_("Are you sure you want to lift this ban?")),
)
def action_delete(self, items, checked):
Ban.objects.filter(id__in=checked).delete()
with UpdatingMonitor() as cm:
monitor.increase('bans_version')
return Message(_('Selected bans have been lifted successfully.'), messages.SUCCESS), reverse('admin_bans')
class New(FormWidget):
"""
Create Ban
"""
admin = site.get_action('bans')
id = 'new'
fallback = 'admin_bans'
form = BanForm
submit_button = _("Set Ban")
def get_new_link(self, model):
return reverse('admin_bans_new')
def get_edit_link(self, model):
return reverse('admin_bans_edit', model)
def submit_form(self, form, target):
new_ban = Ban(
test=form.cleaned_data['test'],
ban=form.cleaned_data['ban'],
reason_user=form.cleaned_data['reason_user'],
reason_admin=form.cleaned_data['reason_admin'],
expires=form.cleaned_data['expires']
)
new_ban.save(force_insert=True)
with UpdatingMonitor() as cm:
monitor.increase('bans_version')
return new_ban, Message(_('New Ban has been set.'), messages.SUCCESS)
class Edit(FormWidget):
"""
Edit Ban
"""
admin = site.get_action('bans')
id = 'edit'
name = _("Edit Ban")
fallback = 'admin_bans'
form = BanForm
target_name = 'ban'
notfound_message = _('Requested Ban could not be found.')
submit_fallback = True
def get_link(self, model):
return reverse('admin_bans_edit', model)
def get_edit_link(self, model):
return self.get_link(model)
def get_initial_data(self, model):
return {
'test': model.test,
'ban': model.ban,
'reason_user': model.reason_user,
'reason_admin': model.reason_admin,
'expires': model.expires,
}
def submit_form(self, form, target):
target.test = form.cleaned_data['test']
target.ban = form.cleaned_data['ban']
target.reason_user = form.cleaned_data['reason_user']
target.reason_admin = form.cleaned_data['reason_admin']
target.expires = form.cleaned_data['expires']
target.save(force_update=True)
with UpdatingMonitor() as cm:
monitor.increase('bans_version')
return target, Message(_('Changes in ban have been saved.'), messages.SUCCESS)
class Delete(ButtonWidget):
"""
Delete Ban
"""
admin = site.get_action('bans')
id = 'delete'
fallback = 'admin_bans'
notfound_message = _('Requested Ban could not be found.')
def action(self, target):
target.delete()
with UpdatingMonitor() as cm:
monitor.increase('bans_version')
if target.test == 0:
return Message(_('E-mail and username Ban "%(ban)s" has been lifted.') % {'ban': target.ban}, messages.SUCCESS), False
if target.test == 1:
return Message(_('Username Ban "%(ban)s" has been lifted.') % {'ban': target.ban}, messages.SUCCESS), False
if target.test == 2:
return Message(_('E-mail Ban "%(ban)s" has been lifted.') % {'ban': target.ban}, messages.SUCCESS), False
if target.test == 3:
return Message(_('IP Ban "%(ban)s" has been lifted.') % {'ban': target.ban}, messages.SUCCESS), False
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.