repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
liangazhou/django-rdp | packages/eclipse/plugins/org.python.pydev_4.4.0.201510052309/pysrc/_pydev_imps/_pydev_pluginbase.py | 52 | 13955 | # -*- coding: utf-8 -*-
"""
pluginbase
~~~~~~~~~~
Pluginbase is a module for Python that provides a system for building
plugin based applications.
:copyright: (c) Copyright 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from pydevd_constants import IS_PY24, IS_PY3K, IS_JYTHON
if IS_PY24:
from _pydev_imps._pydev_uuid_old import uuid4
else:
from uuid import uuid4
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
import errno
try:
from hashlib import md5
except ImportError:
from md5 import md5
import threading
from types import ModuleType
from weakref import ref as weakref
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = (unicode, str)
from cStringIO import StringIO as NativeBytesIO
else:
text_type = str
string_types = (str,)
from io import BytesIO as NativeBytesIO
_local = threading.local()
_internalspace = ModuleType(__name__ + '._internalspace')
_internalspace.__path__ = []
sys.modules[_internalspace.__name__] = _internalspace
def get_plugin_source(module=None, stacklevel=None):
"""Returns the :class:`PluginSource` for the current module or the given
module. The module can be provided by name (in which case an import
will be attempted) or as a module object.
If no plugin source can be discovered, the return value from this method
is `None`.
This function can be very useful if additional data has been attached
to the plugin source. For instance this could allow plugins to get
access to a back reference to the application that created them.
:param module: optionally the module to locate the plugin source of.
:param stacklevel: defines how many levels up the module should search
for before it discovers the plugin frame. The
default is 0. This can be useful for writing wrappers
around this function.
"""
if module is None:
frm = sys._getframe((stacklevel or 0) + 1)
name = frm.f_globals['__name__']
glob = frm.f_globals
elif isinstance(module, string_types):
frm = sys._getframe(1)
name = module
glob = __import__(module, frm.f_globals,
frm.f_locals, ['__dict__']).__dict__
else:
name = module.__name__
glob = module.__dict__
return _discover_space(name, glob)
def _discover_space(name, globals):
try:
return _local.space_stack[-1]
except (AttributeError, IndexError):
pass
if '__pluginbase_state__' in globals:
return globals['__pluginbase_state__'].source
mod_name = None
if globals:
# in unidecode package they pass [] as globals arg
mod_name = globals.get('__name__')
if mod_name is not None and \
mod_name.startswith(_internalspace.__name__ + '.'):
end = mod_name.find('.', len(_internalspace.__name__) + 1)
space = sys.modules.get(mod_name[:end])
if space is not None:
return space.__pluginbase_state__.source
def _shutdown_module(mod):
members = list(mod.__dict__.items())
for key, value in members:
if key[:1] != '_':
setattr(mod, key, None)
for key, value in members:
setattr(mod, key, None)
def _to_bytes(s):
if isinstance(s, text_type):
return s.encode('utf-8')
return s
class _IntentionallyEmptyModule(ModuleType):
def __getattr__(self, name):
try:
return ModuleType.__getattr__(self, name)
except AttributeError:
if name[:2] == '__':
raise
raise RuntimeError(
'Attempted to import from a plugin base module (%s) without '
'having a plugin source activated. To solve this error '
'you have to move the import into a "with" block of the '
'associated plugin source.' % self.__name__)
class _PluginSourceModule(ModuleType):
def __init__(self, source):
modname = '%s.%s' % (_internalspace.__name__, source.spaceid)
ModuleType.__init__(self, modname)
self.__pluginbase_state__ = PluginBaseState(source)
@property
def __path__(self):
try:
ps = self.__pluginbase_state__.source
except AttributeError:
return []
return ps.searchpath + ps.base.searchpath
def _setup_base_package(module_name):
try:
mod = __import__(module_name, None, None, ['__name__'])
except ImportError:
mod = None
if '.' in module_name:
parent_mod = __import__(module_name.rsplit('.', 1)[0],
None, None, ['__name__'])
else:
parent_mod = None
if mod is None:
mod = _IntentionallyEmptyModule(module_name)
if parent_mod is not None:
setattr(parent_mod, module_name.rsplit('.', 1)[-1], mod)
sys.modules[module_name] = mod
class PluginBase(object):
"""The plugin base acts as a control object around a dummy Python
package that acts as a container for plugins. Usually each
application creates exactly one base object for all plugins.
:param package: the name of the package that acts as the plugin base.
Usually this module does not exist. Unless you know
what you are doing you should not create this module
on the file system.
:param searchpath: optionally a shared search path for modules that
will be used by all plugin sources registered.
"""
def __init__(self, package, searchpath=None):
#: the name of the dummy package.
self.package = package
if searchpath is None:
searchpath = []
#: the default search path shared by all plugins as list.
self.searchpath = searchpath
_setup_base_package(package)
def make_plugin_source(self, *args, **kwargs):
"""Creats a plugin source for this plugin base and returns it.
All parameters are forwarded to :class:`PluginSource`.
"""
return PluginSource(self, *args, **kwargs)
class PluginSource(object):
"""The plugin source is what ultimately decides where plugins are
loaded from. Plugin bases can have multiple plugin sources which act
as isolation layer. While this is not a security system it generally
is not possible for plugins from different sources to accidentally
cross talk.
Once a plugin source has been created it can be used in a ``with``
statement to change the behavior of the ``import`` statement in the
block to define which source to load the plugins from::
plugin_source = plugin_base.make_plugin_source(
searchpath=['./path/to/plugins', './path/to/more/plugins'])
with plugin_source:
from myapplication.plugins import my_plugin
:param base: the base this plugin source belongs to.
:param identifier: optionally a stable identifier. If it's not defined
a random identifier is picked. It's useful to set this
to a stable value to have consistent tracebacks
between restarts and to support pickle.
:param searchpath: a list of paths where plugins are looked for.
:param persist: optionally this can be set to `True` and the plugins
will not be cleaned up when the plugin source gets
garbage collected.
"""
# Set these here to false by default so that a completely failing
# constructor does not fuck up the destructor.
persist = False
mod = None
def __init__(self, base, identifier=None, searchpath=None,
persist=False):
#: indicates if this plugin source persists or not.
self.persist = persist
if identifier is None:
identifier = str(uuid4())
#: the identifier for this source.
self.identifier = identifier
#: A reference to the plugin base that created this source.
self.base = base
#: a list of paths where plugins are searched in.
self.searchpath = searchpath
#: The internal module name of the plugin source as it appears
#: in the :mod:`pluginsource._internalspace`.
div = None
self.spaceid = '_sp' + md5(
_to_bytes(self.base.package) + _to_bytes('|') +
_to_bytes(self.identifier)
).hexdigest()
#: a reference to the module on the internal
#: :mod:`pluginsource._internalspace`.
self.mod = _PluginSourceModule(self)
if hasattr(_internalspace, self.spaceid):
raise RuntimeError('This plugin source already exists.')
sys.modules[self.mod.__name__] = self.mod
setattr(_internalspace, self.spaceid, self.mod)
def __del__(self):
if not self.persist:
self.cleanup()
def list_plugins(self):
"""Returns a sorted list of all plugins that are available in this
plugin source. This can be useful to automatically discover plugins
that are available and is usually used together with
:meth:`load_plugin`.
"""
rv = []
for _, modname, ispkg in pkgutil.iter_modules(self.mod.__path__):
rv.append(modname)
return sorted(rv)
def load_plugin(self, name):
"""This automatically loads a plugin by the given name from the
current source and returns the module. This is a convenient
alternative to the import statement and saves you from invoking
``__import__`` or a similar function yourself.
:param name: the name of the plugin to load.
"""
if '.' in name:
raise ImportError('Plugin names cannot contain dots.')
#with self:
# return __import__(self.base.package + '.' + name,
# globals(), {}, ['__name__'])
self.__assert_not_cleaned_up()
_local.__dict__.setdefault('space_stack', []).append(self)
try:
res = __import__(self.base.package + '.' + name,
globals(), {}, ['__name__'])
return res
finally:
try:
_local.space_stack.pop()
except (AttributeError, IndexError):
pass
def open_resource(self, plugin, filename):
"""This function locates a resource inside the plugin and returns
a byte stream to the contents of it. If the resource cannot be
loaded an :exc:`IOError` will be raised. Only plugins that are
real Python packages can contain resources. Plain old Python
modules do not allow this for obvious reasons.
.. versionadded:: 0.3
:param plugin: the name of the plugin to open the resource of.
:param filename: the name of the file within the plugin to open.
"""
mod = self.load_plugin(plugin)
fn = getattr(mod, '__file__', None)
if fn is not None:
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
if os.path.isfile(fn):
return open(os.path.join(os.path.dirname(fn), filename), 'rb')
buf = pkgutil.get_data(self.mod.__name__ + '.' + plugin, filename)
if buf is None:
raise IOError(errno.ENOEXITS, 'Could not find resource')
return NativeBytesIO(buf)
def cleanup(self):
"""Cleans up all loaded plugins manually. This is necessary to
call only if :attr:`persist` is enabled. Otherwise this happens
automatically when the source gets garbage collected.
"""
self.__cleanup()
def __cleanup(self, _sys=sys, _shutdown_module=_shutdown_module):
# The default parameters are necessary because this can be fired
# from the destructor and so late when the interpreter shuts down
# that these functions and modules might be gone.
if self.mod is None:
return
modname = self.mod.__name__
self.mod.__pluginbase_state__ = None
self.mod = None
try:
delattr(_internalspace, self.spaceid)
except AttributeError:
pass
prefix = modname + '.'
_sys.modules.pop(modname)
for key, value in list(_sys.modules.items()):
if not key.startswith(prefix):
continue
mod = _sys.modules.pop(key, None)
if mod is None:
continue
_shutdown_module(mod)
def __assert_not_cleaned_up(self):
if self.mod is None:
raise RuntimeError('The plugin source was already cleaned up.')
def __enter__(self):
self.__assert_not_cleaned_up()
_local.__dict__.setdefault('space_stack', []).append(self)
return self
def __exit__(self, exc_type, exc_value, tb):
try:
_local.space_stack.pop()
except (AttributeError, IndexError):
pass
def _rewrite_module_path(self, modname):
self.__assert_not_cleaned_up()
if modname == self.base.package:
return self.mod.__name__
elif modname.startswith(self.base.package + '.'):
pieces = modname.split('.')
return self.mod.__name__ + '.' + '.'.join(
pieces[self.base.package.count('.') + 1:])
class PluginBaseState(object):
__slots__ = ('_source',)
def __init__(self, source):
if source.persist:
self._source = lambda: source
else:
self._source = weakref(source)
@property
def source(self):
rv = self._source()
if rv is None:
raise AttributeError('Plugin source went away')
return rv
| apache-2.0 |
natanlailari/PennApps2015-Heartmates | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py | 550 | 8977 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| apache-2.0 |
uwafsl/MissionPlanner | Lib/site-packages/numpy/fft/info.py | 85 | 6667 | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the signal from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(A)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad n = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the normalization by :math:`1/n`.
Real and Hermitian transforms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, N-1;\\quad l = 0, \\ldots, M-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
^^^^^^^^^^
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
^^^^^^^^
For examples, see the various functions.
"""
depends = ['core']
| gpl-3.0 |
mibanescu/pulp | server/pulp/server/event/mail.py | 16 | 2423 | import logging
import smtplib
import threading
try:
from email.mime.text import MIMEText
except ImportError:
# python 2.4 version
from email.MIMEText import MIMEText
from pulp.server.compat import json, json_util
from pulp.server.config import config
TYPE_ID = 'email'
_logger = logging.getLogger(__name__)
def handle_event(notifier_config, event):
"""
If email is enabled in the server settings, sends an email to each recipient
listed in the notifier_config.
:param notifier_config: dictionary with keys 'subject', which defines the
subject of each email message, and 'addresses',
which is a list of strings that are email addresses
that should receive this notification.
:type notifier_config: dict
:param event: Event instance
:type event: pulp.server.event.data.event
:return: None
"""
if not config.getboolean('email', 'enabled'):
return
body = json.dumps(event.data(), indent=2, default=json_util.default)
subject = notifier_config['subject']
addresses = notifier_config['addresses']
for address in addresses:
thread = threading.Thread(target=_send_email, args=(subject, body, address))
thread.daemon = True
thread.start()
def _send_email(subject, body, to_address):
"""
Send a text email to one recipient
:param subject: email subject
:type subject: basestring
:param body: text body of the email
:type body: basestring
:param to_address: email address to send to
:type to_address: basestring
:return: None
"""
host = config.get('email', 'host')
port = config.getint('email', 'port')
from_address = config.get('email', 'from')
message = MIMEText(body)
message['Subject'] = subject
message['From'] = from_address
message['To'] = to_address
try:
connection = smtplib.SMTP(host=host, port=port)
except smtplib.SMTPConnectError:
_logger.exception('SMTP connection failed to %s on %s' % (host, port))
return
try:
connection.sendmail(from_address, to_address, message.as_string())
except smtplib.SMTPException:
try:
_logger.exception('Error sending mail.')
except AttributeError:
_logger.error('SMTP error while sending mail')
connection.quit()
| gpl-2.0 |
Kore-Core/kore | qa/rpc-tests/pruning.py | 1 | 16563 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Kore Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import KoreTestFramework
from test_framework.util import *
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(KoreTestFramework):
def __init__(self):
self.utxo = []
self.address = ["",""]
self.txouts = gen_return_txouts()
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
self.address[0] = self.nodes[0].getnewaddress()
self.address[1] = self.nodes[1].getnewaddress()
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
self.mine_full_block(self.nodes[0], self.address[0])
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
self.mine_full_block(self.nodes[0],self.address[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 10:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
self.utxo = self.nodes[1].listunspent()
for i in range(24):
if j == 0:
self.mine_full_block(self.nodes[1],self.address[1])
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
self.utxo = self.nodes[0].listunspent()
for i in range(25):
self.mine_full_block(self.nodes[0],self.address[0])
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3])
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3])
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in range(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Done")
if __name__ == '__main__':
PruneTest().main()
| mit |
chankeypathak/pandas-matplotlib-examples | Lesson 2/pandas_matplot_txt.py | 1 | 1307 | import pandas as pd
import matplotlib.pyplot as plt
from numpy import random
import os
# The inital set of baby names
names = ['Bob','Jessica','Mary','John','Mel']
# This will ensure the random samples below can be reproduced.
# This means the random samples will always be identical.
random.seed(500)
random_names = [names[random.randint(low=0,high=len(names))] for i in range(1000)]
# The number of births per name for the year 1880
births = [random.randint(low=0,high=1000) for i in range(1000)]
BabyDataSet = list(zip(random_names,births))
#print BabyDataSet[:10]
df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
df.to_csv('births1880.txt',index=False,header=False)
Location = 'births1880.txt'
df = pd.read_csv(Location, names=['Names','Births'])
#metadata
#print df.info()
#See first five records using head
#print df.head()
#See last five records using tail
#print df.tail()
os.remove(Location)
#df['Names'].unique()
#print(df['Names'].describe())
# Create a groupby object
name = df.groupby('Names')
# Apply the sum function to the groupby object
df = name.sum()
Sorted = df.sort_values(['Births'], ascending=False)
#print Sorted.head(1)
# Create graph
df['Births'].plot.bar()
print("The most popular name")
df.sort_values(by='Births', ascending=False)
plt.show()
| mit |
lamby/live-studio | contrib/django/contrib/sitemaps/views.py | 4 | 1969 | from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.contrib.sites.models import get_current_site
def index(request, sitemaps,
template_name='sitemap_index.xml', mimetype='application/xml'):
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
site.request = request
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites}, mimetype=mimetype)
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', mimetype='application/xml'):
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
return TemplateResponse(request, template_name, {'urlset': urls}, mimetype=mimetype) | agpl-3.0 |
brchiu/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 17 | 5198 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=(0, 1)),
cov=numpy.squeeze(current_prediction["covariance"], axis=(0, 1)))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
tpokorra/pykolab | wallace.py | 1 | 1170 | #!/usr/bin/python
#
# Copyright 2010-2013 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
# For development purposes
sys.path.extend(['.', '..'])
from pykolab.translate import _
try:
from pykolab.constants import *
except ImportError, e:
print >> sys.stderr, _("Cannot load pykolab/constants.py:")
print >> sys.stderr, "%s" % e
sys.exit(1)
import wallace
if __name__ == "__main__":
wallace = wallace.WallaceDaemon()
wallace.run()
| gpl-3.0 |
Beercow/viper | viper/modules/pymacho/MachODYSymtabCommand.py | 6 | 4187 | # encoding: utf-8
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack, pack
from viper.modules.pymacho.MachOLoadCommand import MachOLoadCommand
from viper.modules.pymacho.Utils import green
class MachODYSymtabCommand(MachOLoadCommand):
ilocalsym = 0
nlocalsym = 0
iextdefsym = 0
nextdefsym = 0
iundefsym = 0
nundefsym = 0
tocoff = 0
ntoc = 0
modtaboff = 0
nmodtab = 0
extrefsymoff = 0
nextrefsym = 0
indirectsymoff = 0
nindirectsyms = 0
extreloff = 0
nextrel = 0
locreloff = 0
nlocrel = 0
def __init__(self, macho_file=None, cmd=0):
self.cmd = cmd
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
self.ilocalsym, self.nlocalsym = unpack('<II', macho_file.read(4*2))
self.iextdefsym, self.nextdefsym = unpack('<II', macho_file.read(4*2))
self.iundefsym, self.nundefsym = unpack('<II', macho_file.read(4*2))
self.tocoff, self.ntoc = unpack('<II', macho_file.read(4*2))
self.modtaboff, self.nmodtab = unpack('<II', macho_file.read(4*2))
self.extrefsymoff, self.nextrefsym = unpack('<II', macho_file.read(4*2))
self.indirectsymoff, self.nindirectsyms = unpack('<II', macho_file.read(4*2))
self.extreloff, self.nextrel = unpack('<II', macho_file.read(4*2))
self.locreloff, self.nlocrel = unpack('<II', macho_file.read(4*2))
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<II', self.cmd, 0x0))
macho_file.write(pack('<II', self.ilocalsym, self.nlocalsym))
macho_file.write(pack('<II', self.iextdefsym, self.nextdefsym))
macho_file.write(pack('<II', self.iundefsym, self.nundefsym))
macho_file.write(pack('<II', self.tocoff, self.ntoc))
macho_file.write(pack('<II', self.modtaboff, self.nmodtab))
macho_file.write(pack('<II', self.extrefsymoff, self.nextrefsym))
macho_file.write(pack('<II', self.indirectsymoff, self.nindirectsyms))
macho_file.write(pack('<II', self.extreloff, self.nextrel))
macho_file.write(pack('<II', self.locreloff, self.nlocrel))
after = macho_file.tell()
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_DYSYMTAB"
print before + "\t- ilocalsym : 0x%x" % self.ilocalsym
print before + "\t- nlocalsym : 0x%x" % self.nlocalsym
print before + "\t- iextdefsym : 0x%x" % self.iextdefsym
print before + "\t- nextdefsym : 0x%x" % self.nextdefsym
print before + "\t- iundefsym : 0x%x" % self.iundefsym
print before + "\t- nundefsym : 0x%x" % self.nundefsym
print before + "\t- tocoff : 0x%x" % self.tocoff
print before + "\t- ntoc : %d" % self.ntoc
print before + "\t- modtaboff : 0x%x" % self.modtaboff
print before + "\t- nmodtab : 0x%x" % self.nmodtab
print before + "\t- extrefsymoff : 0x%x" % self.extrefsymoff
print before + "\t- nextrefsym : 0x%x" % self.nextrefsym
print before + "\t- indirectsymoff : 0x%x" % self.indirectsymoff
print before + "\t- nindirectsyms : 0x%x" % self.nindirectsyms
print before + "\t- extreloff : 0x%x" % self.extreloff
print before + "\t- nextrel : 0x%x" % self.nextrel
print before + "\t- locreloff : 0x%x" % self.locreloff
print before + "\t- nlocrel : 0x%x" % self.nlocrel
| bsd-3-clause |
abreis/swift-gissumo | scripts/templates/01simulateParallel.py | 1 | 5817 | #!/usr/bin/env python3
# This script runs multiple GISSUMO simulations in parallel, one for each floating car data file provided.
import gzip
import os
import plistlib
import re
import shutil
import subprocess
import sys
import time
import datetime
# Requires Python >3.5
assert sys.version_info >= (3,5), "This script requires Python 3.5 or later."
maxThreads = 5
simulationDir = "simulations"
simulationDescription = "description.txt"
floatingCarDataDir = "fcddata"
if not os.path.isdir(floatingCarDataDir):
print("Error: No floating car data directory.")
sys.exit(1)
if os.path.isdir(simulationDir):
if "--overwrite" in sys.argv:
shutil.rmtree(simulationDir)
else:
print("Error: Folder with previous simulations exists, move it before proceeding.")
print("Specify --overwrite on the command line to clear folder.")
sys.exit(1)
if not os.path.isfile('config.plist'):
print("Error: Please provide a reference configuration file.")
sys.exit(1)
if not os.path.isfile('obstructionMask.payload'):
print("Error: Please generate and provide an obstruction mask file.")
sys.exit(1)
# Pull the latest binary
shutil.copy('../../build/gissumo_fast','./')
# Create the simulation directory
os.makedirs(simulationDir, exist_ok=True)
# Count the number of floating car data files
fcdFiles = []
for dirpath, dirnames, filenames in os.walk(floatingCarDataDir):
for file in filenames:
if file.endswith('fcd.tsv'):
fcdFiles.append( os.path.join(dirpath, file) )
# Worker array: each worker can be 'free' or 'busy'
workers = ['free'] * maxThreads
# Holds Popen handles for each worker
workerHandles = [None] * maxThreads
# Holds the start time of each worker, for statistics
workerStartTimes = [None] * maxThreads
# Total number of simulations
totalSimulations = len(fcdFiles)
# Array to store simulation times, for statistics
simulationTimes = []
# Decompresses akin to 'gzip -d', erasing the original .gz
# Returns the name (and path, if provided) of the decompressed file
def gunzip(fileIn):
# Like gunzip, default output has the same base name
fileOut = re.sub('\.gz$', '', fileIn)
# Decompress and close
with gzip.open(fileIn, 'rb') as inFileGzipHandle:
with open(fileOut, 'wb') as outFileGzipHandle:
outFileGzipHandle.write( inFileGzipHandle.read() )
# Wipe compressed file
os.remove(fileIn)
return fileOut
# Routine to create a new simulation
def simulate(fcdFileIn):
simulationName = re.sub('\.fcd.tsv$', '', os.path.basename(fcdFileIn))
# Find a free worker and mark it busy
freeWorkerId = workers.index('free')
workers[freeWorkerId] = 'busy'
# Create the base simulation directory
os.makedirs(os.path.join(simulationDir, simulationName), exist_ok=True)
# Copy the FCD file over
fcdFile = os.path.join(simulationDir, simulationName, os.path.basename(fcdFileIn))
shutil.copyfile(fcdFileIn, fcdFile)
# Uncompress the FCD file
#fcdFile = gunzip(fcdFile)
# Copy the reference configuration file over
configFile = os.path.join(simulationDir, simulationName, 'config.plist')
shutil.copyfile('config.plist', configFile)
# Import and edit the configuration
with open(configFile, 'rb') as configFileHandle:
configFileDict = plistlib.load(configFileHandle, fmt=plistlib.FMT_XML)
# Edit 'floatingCarDataFile' and 'statsFolder' on the configuration
configFileDict['floatingCarDataFile'] = fcdFile
configFileDict['stats']['statsFolder'] = os.path.join(simulationDir, simulationName, 'stats')
# Set 'gis.database' to match the free worker id
configFileDict['gis']['database'] = 'gisdb{:d}'.format(freeWorkerId)
# Write to the configuration file and close
with open(configFile, 'wb') as configFileHandle:
plistlib.dump(configFileDict, configFileHandle, fmt=plistlib.FMT_XML)
# Simulate
workerStartTimes[freeWorkerId] = time.time()
runString ="./gissumo_fast {:s} > {:s} 2>&1".format(configFile, os.path.join(simulationDir, simulationName, 'gissumo.log'))
workerHandles[freeWorkerId] = subprocess.Popen(runString, shell=True)
# Main loop
simulationCount = 0
while True:
# Update worker statuses
for workerId, worker in enumerate(workers):
if worker == 'busy':
if workerHandles[workerId].poll() != None:
# Worker has finished
workers[workerId] = 'free'
# Save simulation time
simulationTimes.append(time.time() - workerStartTimes[workerId])
# Update simulation count
simulationCount += 1
# Print some statistics if a simulation finished
meanSimulationTime = sum(simulationTimes)/len(simulationTimes)/maxThreads
remainingTime = meanSimulationTime*(totalSimulations-simulationCount)
print("{:s} {:d}/{:d} simulations complete, ETA {:d}h{:02d}m{:02d}s".format(str(datetime.datetime.now().time()), simulationCount, totalSimulations, int(remainingTime/3600), int(remainingTime%3600/60), int(remainingTime%60)), flush=True)
# Run a simulation if a free worker is available
if (len(fcdFiles) > 0) and (workers.count('free') > 0):
# Pull a new simulation file
newFcdFile = fcdFiles.pop(0)
# Simulate it
simulate(newFcdFile)
# Iterate until no simulations remain, and no workers still busy
if (len(fcdFiles) == 0) and (workers.count('busy') == 0):
break
time.sleep(1)
# Create a file with a description of the simulation set (overwriting)
with open(os.path.join(simulationDir, simulationDescription), 'w') as descriptionFp:
descriptionFp.write("simulations: {:d}\n".format(totalSimulations))
# Simulation over
print("Set complete, ran {:d} simulations.".format(totalSimulations))
# Remove FCD files and simulation timetrackers in the simulation dir
for dirpath, dirnames, filenames in os.walk(simulationDir):
for file in filenames:
if file.endswith('fcd.tsv') or file=='simulationTime.log':
os.remove(os.path.join(dirpath, file))
# Clean up
os.remove('gissumo_fast')
| mit |
camny125/grit-i18n | grit/format/policy_templates/writers/adml_writer_unittest.py | 41 | 16180 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for grit.format.policy_templates.writers.adml_writer."""
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from grit.format.policy_templates.writers import adml_writer
from grit.format.policy_templates.writers import xml_writer_base_unittest
class AdmlWriterUnittest(xml_writer_base_unittest.XmlWriterBaseTest):
def setUp(self):
config = {
'app_name': 'test',
'build': 'test',
'win_supported_os': 'SUPPORTED_TESTOS',
}
self.writer = adml_writer.GetWriter(config)
self.writer.messages = {
'win_supported_winxpsp2': {
'text': 'Supported on Test OS or higher',
'desc': 'blah'
},
'doc_recommended': {
'text': 'Recommended',
'desc': 'bleh'
},
}
self.writer.Init()
def _InitWriterForAddingPolicyGroups(self, writer):
'''Initialize the writer for adding policy groups. This method must be
called before the method "BeginPolicyGroup" can be called. It initializes
attributes of the writer.
'''
writer.BeginTemplate()
def _InitWriterForAddingPolicies(self, writer, policy):
'''Initialize the writer for adding policies. This method must be
called before the method "WritePolicy" can be called. It initializes
attributes of the writer.
'''
self._InitWriterForAddingPolicyGroups(writer)
policy_group = {
'name': 'PolicyGroup',
'caption': 'Test Caption',
'desc': 'This is the test description of the test policy group.',
'policies': policy,
}
writer.BeginPolicyGroup(policy_group)
string_elements = \
self.writer._string_table_elem.getElementsByTagName('string')
for elem in string_elements:
self.writer._string_table_elem.removeChild(elem)
def testEmpty(self):
self.writer.BeginTemplate()
self.writer.EndTemplate()
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?><policyDefinitionResources'
' revision="1.0" schemaVersion="1.0"><displayName/><description/>'
'<resources><stringTable><string id="SUPPORTED_TESTOS">Supported on'
' Test OS or higher</string></stringTable><presentationTable/>'
'</resources></policyDefinitionResources>')
self.AssertXMLEquals(output, expected_output)
def testVersionAnnotation(self):
self.writer.config['version'] = '39.0.0.0'
self.writer.BeginTemplate()
self.writer.EndTemplate()
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?><policyDefinitionResources'
' revision="1.0" schemaVersion="1.0"><!--test version: 39.0.0.0-->'
'<displayName/><description/><resources><stringTable>'
'<string id="SUPPORTED_TESTOS">Supported on'
' Test OS or higher</string></stringTable><presentationTable/>'
'</resources></policyDefinitionResources>')
self.AssertXMLEquals(output, expected_output)
def testPolicyGroup(self):
empty_policy_group = {
'name': 'PolicyGroup',
'caption': 'Test Group Caption',
'desc': 'This is the test description of the test policy group.',
'policies': [
{'name': 'PolicyStub2',
'type': 'main'},
{'name': 'PolicyStub1',
'type': 'main'},
],
}
self._InitWriterForAddingPolicyGroups(self.writer)
self.writer.BeginPolicyGroup(empty_policy_group)
self.writer.EndPolicyGroup
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="SUPPORTED_TESTOS">'
'Supported on Test OS or higher</string>\n'
'<string id="PolicyGroup_group">Test Group Caption</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = ''
self.AssertXMLEquals(output, expected_output)
def testMainPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
'caption': 'Main policy caption',
'desc': 'Main policy test description.'
}
self. _InitWriterForAddingPolicies(self.writer, main_policy)
self.writer.WritePolicy(main_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="DummyMainPolicy">Main policy caption</string>\n'
'<string id="DummyMainPolicy_Explain">'
'Main policy test description.</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = '<presentation id="DummyMainPolicy"/>'
self.AssertXMLEquals(output, expected_output)
def testStringPolicy(self):
string_policy = {
'name': 'StringPolicyStub',
'type': 'string',
'caption': 'String policy caption',
'label': 'String policy label',
'desc': 'This is a test description.',
}
self. _InitWriterForAddingPolicies(self.writer, string_policy)
self.writer.WritePolicy(string_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="StringPolicyStub">String policy caption</string>\n'
'<string id="StringPolicyStub_Explain">'
'This is a test description.</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="StringPolicyStub">\n'
' <textBox refId="StringPolicyStub">\n'
' <label>String policy label</label>\n'
' </textBox>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testIntPolicy(self):
int_policy = {
'name': 'IntPolicyStub',
'type': 'int',
'caption': 'Int policy caption',
'label': 'Int policy label',
'desc': 'This is a test description.',
}
self. _InitWriterForAddingPolicies(self.writer, int_policy)
self.writer.WritePolicy(int_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="IntPolicyStub">Int policy caption</string>\n'
'<string id="IntPolicyStub_Explain">'
'This is a test description.</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="IntPolicyStub">\n'
' <decimalTextBox refId="IntPolicyStub">'
'Int policy label:</decimalTextBox>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testIntEnumPolicy(self):
enum_policy = {
'name': 'EnumPolicyStub',
'type': 'int-enum',
'caption': 'Enum policy caption',
'label': 'Enum policy label',
'desc': 'This is a test description.',
'items': [
{
'name': 'item 1',
'value': 1,
'caption': 'Caption Item 1',
},
{
'name': 'item 2',
'value': 2,
'caption': 'Caption Item 2',
},
],
}
self. _InitWriterForAddingPolicies(self.writer, enum_policy)
self.writer.WritePolicy(enum_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="EnumPolicyStub">Enum policy caption</string>\n'
'<string id="EnumPolicyStub_Explain">'
'This is a test description.</string>\n'
'<string id="item 1">Caption Item 1</string>\n'
'<string id="item 2">Caption Item 2</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="EnumPolicyStub">\n'
' <dropdownList refId="EnumPolicyStub">'
'Enum policy label</dropdownList>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testStringEnumPolicy(self):
enum_policy = {
'name': 'EnumPolicyStub',
'type': 'string-enum',
'caption': 'Enum policy caption',
'label': 'Enum policy label',
'desc': 'This is a test description.',
'items': [
{
'name': 'item 1',
'value': 'value 1',
'caption': 'Caption Item 1',
},
{
'name': 'item 2',
'value': 'value 2',
'caption': 'Caption Item 2',
},
],
}
self. _InitWriterForAddingPolicies(self.writer, enum_policy)
self.writer.WritePolicy(enum_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="EnumPolicyStub">Enum policy caption</string>\n'
'<string id="EnumPolicyStub_Explain">'
'This is a test description.</string>\n'
'<string id="item 1">Caption Item 1</string>\n'
'<string id="item 2">Caption Item 2</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="EnumPolicyStub">\n'
' <dropdownList refId="EnumPolicyStub">'
'Enum policy label</dropdownList>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testListPolicy(self):
list_policy = {
'name': 'ListPolicyStub',
'type': 'list',
'caption': 'List policy caption',
'label': 'List policy label',
'desc': 'This is a test description.',
}
self. _InitWriterForAddingPolicies(self.writer, list_policy)
self.writer.WritePolicy(list_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="ListPolicyStub">List policy caption</string>\n'
'<string id="ListPolicyStub_Explain">'
'This is a test description.</string>\n'
'<string id="ListPolicyStubDesc">List policy caption</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="ListPolicyStub">\n'
' <listBox refId="ListPolicyStubDesc">List policy label</listBox>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testStringEnumListPolicy(self):
list_policy = {
'name': 'ListPolicyStub',
'type': 'string-enum-list',
'caption': 'List policy caption',
'label': 'List policy label',
'desc': 'This is a test description.',
'items': [
{
'name': 'item 1',
'value': 'value 1',
'caption': 'Caption Item 1',
},
{
'name': 'item 2',
'value': 'value 2',
'caption': 'Caption Item 2',
},
],
}
self. _InitWriterForAddingPolicies(self.writer, list_policy)
self.writer.WritePolicy(list_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="ListPolicyStub">List policy caption</string>\n'
'<string id="ListPolicyStub_Explain">'
'This is a test description.</string>\n'
'<string id="ListPolicyStubDesc">List policy caption</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="ListPolicyStub">\n'
' <listBox refId="ListPolicyStubDesc">List policy label</listBox>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testDictionaryPolicy(self):
dict_policy = {
'name': 'DictionaryPolicyStub',
'type': 'dict',
'caption': 'Dictionary policy caption',
'label': 'Dictionary policy label',
'desc': 'This is a test description.',
}
self. _InitWriterForAddingPolicies(self.writer, dict_policy)
self.writer.WritePolicy(dict_policy)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="DictionaryPolicyStub">Dictionary policy caption</string>\n'
'<string id="DictionaryPolicyStub_Explain">'
'This is a test description.</string>')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="DictionaryPolicyStub">\n'
' <textBox refId="DictionaryPolicyStub">\n'
' <label>Dictionary policy label</label>\n'
' </textBox>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
def testPlatform(self):
# Test that the writer correctly chooses policies of platform Windows.
self.assertTrue(self.writer.IsPolicySupported({
'supported_on': [
{'platforms': ['win', 'zzz']}, {'platforms': ['aaa']}
]
}))
self.assertFalse(self.writer.IsPolicySupported({
'supported_on': [
{'platforms': ['mac', 'linux']}, {'platforms': ['aaa']}
]
}))
def testStringEncodings(self):
enum_policy_a = {
'name': 'EnumPolicy.A',
'type': 'string-enum',
'caption': 'Enum policy A caption',
'label': 'Enum policy A label',
'desc': 'This is a test description.',
'items': [
{
'name': 'tls1.2',
'value': 'tls1.2',
'caption': 'tls1.2',
}
],
}
enum_policy_b = {
'name': 'EnumPolicy.B',
'type': 'string-enum',
'caption': 'Enum policy B caption',
'label': 'Enum policy B label',
'desc': 'This is a test description.',
'items': [
{
'name': 'tls1.2',
'value': 'tls1.2',
'caption': 'tls1.2',
}
],
}
self. _InitWriterForAddingPolicies(self.writer, enum_policy_a)
self.writer.WritePolicy(enum_policy_a)
self.writer.WritePolicy(enum_policy_b)
# Assert generated string elements.
output = self.GetXMLOfChildren(self.writer._string_table_elem)
expected_output = (
'<string id="EnumPolicy_A">Enum policy A caption</string>\n'
'<string id="EnumPolicy_A_Explain">'
'This is a test description.</string>\n'
'<string id="tls1_2">tls1.2</string>\n'
'<string id="EnumPolicy_B">Enum policy B caption</string>\n'
'<string id="EnumPolicy_B_Explain">'
'This is a test description.</string>\n')
self.AssertXMLEquals(output, expected_output)
# Assert generated presentation elements.
output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
expected_output = (
'<presentation id="EnumPolicy.A">\n'
' <dropdownList refId="EnumPolicy.A">'
'Enum policy A label</dropdownList>\n'
'</presentation>\n'
'<presentation id="EnumPolicy.B">\n'
' <dropdownList refId="EnumPolicy.B">'
'Enum policy B label</dropdownList>\n'
'</presentation>')
self.AssertXMLEquals(output, expected_output)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
maurerpe/FreeCAD | src/Mod/TemplatePyMod/InitGui.py | 58 | 1410 | # TemplatePyMod gui init module
# (c) 2007 Juergen Riegel LGPL
#
class TemplatePyModWorkbench ( Workbench ):
"Test workbench object"
Icon = """
/* XPM */
static const char *test_icon[]={
"16 16 2 1",
"a c #000000",
". c None",
"................",
"................",
"..############..",
"..############..",
"..############..",
"......####......",
"......####......",
"......####......",
"......####......",
"......####......",
"......####......",
"......####......",
"......####......",
"......####......",
"................",
"................"};
"""
MenuText = "Python sandbox"
ToolTip = "Python template workbench"
def Initialize(self):
import Commands
self.appendToolbar("TemplateTools",["TemplatePyMod_Cmd1","TemplatePyMod_Cmd2","TemplatePyMod_Cmd3","TemplatePyMod_Cmd4","TemplatePyMod_Cmd5"])
menu = ["ModulePy &Commands","PyModuleCommands"]
list = ["TemplatePyMod_Cmd1","TemplatePyMod_Cmd2","TemplatePyMod_Cmd3","TemplatePyMod_Cmd5","TemplatePyMod_Cmd6"]
self.appendCommandbar("PyModuleCommands",list)
self.appendMenu(menu,list)
Log ('Loading TemplatePyMod module... done\n')
def Activated(self):
Msg("TemplatePyModWorkbench::Activated()\n")
def Deactivated(self):
Msg("TemplatePyModWorkbench::Deactivated()\n")
Gui.addWorkbench(TemplatePyModWorkbench)
| lgpl-2.1 |
dhermes/gcloud-python | bigquery/google/cloud/bigquery/_helpers.py | 2 | 14907 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helper functions for BigQuery API classes."""
import base64
import datetime
import decimal
from google.cloud._helpers import UTC
from google.cloud._helpers import _date_from_iso8601_date
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _microseconds_from_datetime
from google.cloud._helpers import _RFC3339_NO_FRACTION
from google.cloud._helpers import _to_bytes
_RFC3339_MICROS_NO_ZULU = "%Y-%m-%dT%H:%M:%S.%f"
_TIMEONLY_WO_MICROS = "%H:%M:%S"
_TIMEONLY_W_MICROS = "%H:%M:%S.%f"
def _not_null(value, field):
"""Check whether 'value' should be coerced to 'field' type."""
return value is not None or field.mode != "NULLABLE"
def _int_from_json(value, field):
"""Coerce 'value' to an int, if set or not nullable."""
if _not_null(value, field):
return int(value)
def _float_from_json(value, field):
"""Coerce 'value' to a float, if set or not nullable."""
if _not_null(value, field):
return float(value)
def _decimal_from_json(value, field):
"""Coerce 'value' to a Decimal, if set or not nullable."""
if _not_null(value, field):
return decimal.Decimal(value)
def _bool_from_json(value, field):
"""Coerce 'value' to a bool, if set or not nullable."""
if _not_null(value, field):
return value.lower() in ["t", "true", "1"]
def _string_from_json(value, _):
"""NOOP string -> string coercion"""
return value
def _bytes_from_json(value, field):
"""Base64-decode value"""
if _not_null(value, field):
return base64.standard_b64decode(_to_bytes(value))
def _timestamp_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable."""
if _not_null(value, field):
# value will be a float in seconds, to microsecond precision, in UTC.
return _datetime_from_microseconds(1e6 * float(value))
def _timestamp_query_param_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable.
Args:
value (str): The timestamp.
field (.SchemaField): The field corresponding to the value.
Returns:
Optional[datetime.datetime]: The parsed datetime object from
``value`` if the ``field`` is not null (otherwise it is
:data:`None`).
"""
if _not_null(value, field):
# Canonical formats for timestamps in BigQuery are flexible. See:
# g.co/cloud/bigquery/docs/reference/standard-sql/data-types#timestamp-type
# The separator between the date and time can be 'T' or ' '.
value = value.replace(" ", "T", 1)
# The UTC timezone may be formatted as Z or +00:00.
value = value.replace("Z", "")
value = value.replace("+00:00", "")
if "." in value:
# YYYY-MM-DDTHH:MM:SS.ffffff
return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace(
tzinfo=UTC
)
else:
# YYYY-MM-DDTHH:MM:SS
return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace(
tzinfo=UTC
)
else:
return None
def _datetime_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable.
Args:
value (str): The timestamp.
field (.SchemaField): The field corresponding to the value.
Returns:
Optional[datetime.datetime]: The parsed datetime object from
``value`` if the ``field`` is not null (otherwise it is
:data:`None`).
"""
if _not_null(value, field):
if "." in value:
# YYYY-MM-DDTHH:MM:SS.ffffff
return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU)
else:
# YYYY-MM-DDTHH:MM:SS
return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)
else:
return None
def _date_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
# value will be a string, in YYYY-MM-DD form.
return _date_from_iso8601_date(value)
def _time_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
if len(value) == 8: # HH:MM:SS
fmt = _TIMEONLY_WO_MICROS
elif len(value) == 15: # HH:MM:SS.micros
fmt = _TIMEONLY_W_MICROS
else:
raise ValueError("Unknown time format: {}".format(value))
return datetime.datetime.strptime(value, fmt).time()
def _record_from_json(value, field):
"""Coerce 'value' to a mapping, if set or not nullable."""
if _not_null(value, field):
record = {}
record_iter = zip(field.fields, value["f"])
for subfield, cell in record_iter:
converter = _CELLDATA_FROM_JSON[subfield.field_type]
if subfield.mode == "REPEATED":
value = [converter(item["v"], subfield) for item in cell["v"]]
else:
value = converter(cell["v"], subfield)
record[subfield.name] = value
return record
_CELLDATA_FROM_JSON = {
"INTEGER": _int_from_json,
"INT64": _int_from_json,
"FLOAT": _float_from_json,
"FLOAT64": _float_from_json,
"NUMERIC": _decimal_from_json,
"BOOLEAN": _bool_from_json,
"BOOL": _bool_from_json,
"STRING": _string_from_json,
"GEOGRAPHY": _string_from_json,
"BYTES": _bytes_from_json,
"TIMESTAMP": _timestamp_from_json,
"DATETIME": _datetime_from_json,
"DATE": _date_from_json,
"TIME": _time_from_json,
"RECORD": _record_from_json,
}
_QUERY_PARAMS_FROM_JSON = dict(_CELLDATA_FROM_JSON)
_QUERY_PARAMS_FROM_JSON["TIMESTAMP"] = _timestamp_query_param_from_json
def _field_to_index_mapping(schema):
"""Create a mapping from schema field name to index of field."""
return {f.name: i for i, f in enumerate(schema)}
def _row_tuple_from_json(row, schema):
"""Convert JSON row data to row with appropriate types.
Note: ``row['f']`` and ``schema`` are presumed to be of the same length.
:type row: dict
:param row: A JSON response row to be converted.
:type schema: tuple
:param schema: A tuple of
:class:`~google.cloud.bigquery.schema.SchemaField`.
:rtype: tuple
:returns: A tuple of data converted to native types.
"""
row_data = []
for field, cell in zip(schema, row["f"]):
converter = _CELLDATA_FROM_JSON[field.field_type]
if field.mode == "REPEATED":
row_data.append([converter(item["v"], field) for item in cell["v"]])
else:
row_data.append(converter(cell["v"], field))
return tuple(row_data)
def _rows_from_json(values, schema):
"""Convert JSON row data to rows with appropriate types."""
from google.cloud.bigquery import Row
field_to_index = _field_to_index_mapping(schema)
return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values]
def _int_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, int):
value = str(value)
return value
def _float_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
return value
def _decimal_to_json(value):
"""Coerce 'value' to a JSON-compatible representation."""
if isinstance(value, decimal.Decimal):
value = str(value)
return value
def _bool_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bool):
value = "true" if value else "false"
return value
def _bytes_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bytes):
value = base64.standard_b64encode(value).decode("ascii")
return value
def _timestamp_to_json_parameter(value):
"""Coerce 'value' to an JSON-compatible representation.
This version returns the string representation used in query parameters.
"""
if isinstance(value, datetime.datetime):
if value.tzinfo not in (None, UTC):
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
value = "%s %s+00:00" % (value.date().isoformat(), value.time().isoformat())
return value
def _timestamp_to_json_row(value):
"""Coerce 'value' to an JSON-compatible representation.
This version returns floating-point seconds value used in row data.
"""
if isinstance(value, datetime.datetime):
value = _microseconds_from_datetime(value) * 1e-6
return value
def _datetime_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.datetime):
value = value.strftime(_RFC3339_MICROS_NO_ZULU)
return value
def _date_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.date):
value = value.isoformat()
return value
def _time_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.time):
value = value.isoformat()
return value
# Converters used for scalar values marshalled as row data.
_SCALAR_VALUE_TO_JSON_ROW = {
"INTEGER": _int_to_json,
"INT64": _int_to_json,
"FLOAT": _float_to_json,
"FLOAT64": _float_to_json,
"NUMERIC": _decimal_to_json,
"BOOLEAN": _bool_to_json,
"BOOL": _bool_to_json,
"BYTES": _bytes_to_json,
"TIMESTAMP": _timestamp_to_json_row,
"DATETIME": _datetime_to_json,
"DATE": _date_to_json,
"TIME": _time_to_json,
}
# Converters used for scalar values marshalled as query parameters.
_SCALAR_VALUE_TO_JSON_PARAM = _SCALAR_VALUE_TO_JSON_ROW.copy()
_SCALAR_VALUE_TO_JSON_PARAM["TIMESTAMP"] = _timestamp_to_json_parameter
def _snake_to_camel_case(value):
"""Convert snake case string to camel case."""
words = value.split("_")
return words[0] + "".join(map(str.capitalize, words[1:]))
def _get_sub_prop(container, keys, default=None):
"""Get a nested value from a dictionary.
This method works like ``dict.get(key)``, but for nested values.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to get the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
default (object):
(Optional) Value to returned if any of the keys are not found.
Defaults to ``None``.
Examples:
Get a top-level value (equivalent to ``container.get('key')``).
>>> _get_sub_prop({'key': 'value'}, ['key'])
'value'
Get a top-level value, providing a default (equivalent to
``container.get('key', default='default')``).
>>> _get_sub_prop({'nothere': 123}, ['key'], default='not found')
'not found'
Get a nested value.
>>> _get_sub_prop({'key': {'subkey': 'value'}}, ['key', 'subkey'])
'value'
Returns:
object: The value if present or the default.
"""
sub_val = container
for key in keys:
if key not in sub_val:
return default
sub_val = sub_val[key]
return sub_val
def _set_sub_prop(container, keys, value):
"""Set a nested value in a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to set the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
value (object): Value to set within the container.
Examples:
Set a top-level value (equivalent to ``container['key'] = 'value'``).
>>> container = {}
>>> _set_sub_prop(container, ['key'], 'value')
>>> container
{'key': 'value'}
Set a nested value.
>>> container = {}
>>> _set_sub_prop(container, ['key', 'subkey'], 'value')
>>> container
{'key': {'subkey': 'value'}}
Replace a nested value.
>>> container = {'key': {'subkey': 'prev'}}
>>> _set_sub_prop(container, ['key', 'subkey'], 'new')
>>> container
{'key': {'subkey': 'new'}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {}
sub_val = sub_val[key]
sub_val[keys[-1]] = value
def _del_sub_prop(container, keys):
"""Remove a nested key fro a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to clear the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
Examples:
Remove a top-level value (equivalent to ``del container['key']``).
>>> container = {'key': 'value'}
>>> _del_sub_prop(container, ['key'])
>>> container
{}
Remove a nested value.
>>> container = {'key': {'subkey': 'value'}}
>>> _del_sub_prop(container, ['key', 'subkey'])
>>> container
{'key': {}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {}
sub_val = sub_val[key]
if keys[-1] in sub_val:
del sub_val[keys[-1]]
def _int_or_none(value):
"""Helper: deserialize int value from JSON string."""
if isinstance(value, int):
return value
if value is not None:
return int(value)
def _str_or_none(value):
"""Helper: serialize value to JSON string."""
if value is not None:
return str(value)
| apache-2.0 |
adityaseven/libopencm3 | scripts/data/lpc43xx/csv2yaml.py | 42 | 1117 | #!/usr/bin/env python
import sys
import yaml
import csv
from collections import OrderedDict
import yaml_odict
def convert_file(fname):
reader = csv.reader(open(fname, 'r'))
registers = OrderedDict()
for register_name, lsb, width, field_name, description, reset_value, access in reader:
if register_name not in registers:
registers[register_name] = {
'fields': OrderedDict(),
}
register = registers[register_name]
fields = register['fields']
if field_name in fields:
raise RuntimeError('Duplicate field name "%s" in register "%s"' %
field_name, register_name)
else:
fields[field_name] = {
'lsb': int(lsb),
'width': int(width),
'description': description,
'reset_value': reset_value,
'access': access,
}
with open(fname.replace('.csv', '.yaml'), 'w') as out_file:
yaml.dump(registers, out_file, default_flow_style=False)
for fname in sys.argv[1:]:
convert_file(fname)
| gpl-3.0 |
leandrohrb/Anti-Baby-Bot | TelegramBot.py | 1 | 9237 | #!/usr/bin/python
### Import libraries ###
import sys
import time
import telepot
import datetime
import json
import os
from User import User
from threading import Thread
import time
import pytz
### Create users dictionary ###
users = {}
### Get bot token ###
def getToken (file):
with open (file, "r") as tokenFile:
return tokenFile.read().replace('\n', '')
### Change user time to receive alerts ###
def changeTime (userId, choosenTime):
# check if time is valid and then change the alert time
try:
# transform string to time object
choosenTime = time.strptime(choosenTime, "%H:%M")
# change everyday time to receive alert
users[userId].hour = choosenTime.tm_hour
users[userId].minute = choosenTime.tm_min
# change today time to receive alert
users[userId].messageHour = users[userId].hour
users[userId].messageMinute = users[userId].minute
# convert time to string, to print current time of alert receiving
printTime = users[userId].hour*3600 + users[userId].minute*60
printTime = time.gmtime(printTime)
printTime = time.strftime("%H:%M", printTime)
# warn the user that he changed the time
messageToUser = "Alright, now the upcoming alerts I'll send you will be at %s" %printTime
bot.sendMessage(userId, messageToUser)
modifyTimeUser(userId, choosenTime.tm_hour, choosenTime.tm_min)
# if the time is not valid, warn the user
except ValueError:
messageToUser = "You didn't send me a valid time. Type /time and try it again"
bot.sendMessage(userId, messageToUser)
### database functions
# delete a user from database by userId
def deleteUser(id):
del users[id]
database = open("database.id", "w")
for user in users:
insertUser(users[user].userId, users[user].userName, users[user].messageHour, users[user].messageMinute)
# TALVEZ ESTEJA ERRADO
#loadUsers()
return
# modify time info about some user in database by userId
def modifyTimeUser(id, hour, minute):
users[id].messageHour = hour
users[id].messageMinute = minute
database = open("database.id", "w")
for user in users:
insertUser(users[user].userId, users[user].userName, users[user].messageHour, users[user].messageMinute)
### Insert user in database.id using json ###
def insertUser(id, name, hour, minute):
user_struct = {'id' : id, 'name': name, 'hour' : hour, 'minute' : minute }
user_json = json.dumps(user_struct)
database = open("database.id", "a")
database.write(user_json + "\n")
database.close()
### Load users from database.id and add them to users dictionary ###
def loadUsers():
# creates database.id if it doesnt exists yet
if not(os.path.isfile("database.id")):
open("database.id", "w").close()
with open("database.id", "r") as database:
for user in database:
u = json.loads(user)
newUser = User(u['id'], u['name'], u['hour'], u['minute'])
users[u['id']] = newUser
thread = Thread(target = checkTime, args = (u['id'],))
thread.start()
### Handle messages reiceved from users ###
def chatMessage (message):
# get the user id
userId = message['chat']['id']
# get the user name
userName = message['chat']['first_name']
# get the text
text = message['text']
# get the time now (on SP)
timeZone = pytz.timezone('Brazil/East')
timeNow = datetime.datetime.now(timeZone)
# start the user
if text == '/start':
# welcome him and explain how the bot works
messageToUser = "Hello! I'll help you to remember to take the contraceptive pills."
messageToUser+= "\nTo change the time to receive alerts, type /time;"
messageToUser+= "\nTo stop me, type /stop."
bot.sendMessage(userId, messageToUser)
# if the user is not already in the dictionary, put it there
# and then spawn a thread to keep checking if the time to send alert has arrived
if userId not in users:
newUser = User(userId, userName, 8, 0)
users[userId] = newUser
thread = Thread(target = checkTime, args = (userId,))
thread.start()
# check if the user is already in the database
# if he's not, put it in
userAlreadyOnDatabase = False
with open("database.id", "r") as database:
for user in database:
u = json.loads(user)
if u['id'] == userId:
userAlreadyOnDatabase = True
if not userAlreadyOnDatabase:
insertUser(userId, userName, users[userId].messageHour, users[userId].messageMinute)
# give some info about the bot
elif text == '/about':
messageToUser = "This bot is a free software under GPL v3 and comes without any warranty."
messageToUser+= "\nCheck the code in https://git.io/vDSYp"
messageToUser+= "\nFor more infomation, talk to the devs:"
messageToUser+= "\n@andrealmeid"
messageToUser+= "\n@leandrohrb"
bot.sendMessage(userId, messageToUser)
# the user don't want to receive alerts anymore
elif userId in users and text == '/stop':
messageToUser = "If you ever want to receive alerts from me again, type /start"
messageToUser+= "\nBye bye!"
bot.sendMessage(userId, messageToUser)
deleteUser(userId)
# the user answered if he took the pills or not
elif userId in users and users[userId].askFlag == 1:
timeZone = pytz.timezone('Brazil/East')
timeNow = datetime.datetime.now(timeZone)
rememberMessage(bot, text, userId, timeNow)
# the user asked to change the time to receive the alerts
elif userId in users and text == '/time':
users[userId].timeFlag = 1
# convert time to string, to print current time of alert receiving
printTime = users[userId].hour*3600 + users[userId].minute*60
printTime = time.gmtime(printTime)
printTime = time.strftime("%H:%M", printTime)
# send the message
messageToUser = "By now, I send you alerts at %s." %printTime
messageToUser+= "\nTell me the time you want to receive the alerts, in the format HH:MM. For example, '09:30'"
bot.sendMessage(userId, messageToUser)
# change the message time (text is the time typed by user, in string format)
elif userId in users and users[userId].timeFlag == 1:
changeTime(userId, text)
users[userId].timeFlag = 0
# this bot don't like humans, so he won't answer anything else
elif userId in users:
messageToUser = "I don't speak humanoide"
bot.sendMessage(userId, messageToUser)
### Check the time to send the alert message (it's a thread target) ###
def checkTime (userId):
# keep the thread running
while True:
# check if this user still in dictionary, if dont, stop thread
if userId not in users:
return
# send the contraceptive alert, if the time is correct
timeZone = pytz.timezone('Brazil/East')
timeNow = datetime.datetime.now(timeZone)
if timeNow.hour == users[userId].messageHour and timeNow.minute == users[userId].messageMinute:
alertMessage(userId, bot)
users[userId].messageHour, users[userId].messageMinute = users[userId].hour, users[userId].minute
time.sleep(60)
# if the time is not correct, just wait
else:
time.sleep(20)
### Send the alert message for the user ###
def alertMessage (userId, bot):
# send the message
messageToUser = "Did you take the pills? Please answer, 'Yes' or 'No'"
bot.sendMessage(userId, messageToUser)
users[userId].askFlag = 1
### Send the remember message after 30 minutes, if the user did not took the pills ###
def rememberMessage (bot, text, userId, timeNow):
# the user took the pills, congratz him
if text == 'yes' or text == 'Yes':
messageToUser = "No babies for you, congratulations!!!"
bot.sendMessage(userId, messageToUser)
users[userId].askFlag = 0
# the user did not took the pills, warn him in 30 minutes
elif text == 'no' or text == 'No':
messageToUser = "Hmmm... this is bad. I don't like babies. I'll remember you in 30 minutes"
bot.sendMessage(userId, messageToUser)
newTime = timeNow + datetime.timedelta(minutes=30)
users[userId].messageHour, users[userId].messageMinute = newTime.hour, newTime.minute
users[userId].askFlag = 0
# if the user answered anything else than 'yes' or 'no', make him answer
else:
messageToUser = "Please answer, 'Yes' or 'No'?"
bot.sendMessage(userId, messageToUser)
users[userId].askFlag = 1
###############
### Program ###
###############
### get bot token ###
token = getToken ("token.id")
# create bot with it's token
bot = telepot.Bot(token)
# load users from database
loadUsers()
# get inputs from users and handle them
bot.message_loop(chatMessage)
# keep the bot running
while True:
time.sleep(60)
| gpl-3.0 |
mapr/hue | desktop/core/ext-py/Django-1.6.10/tests/aggregation/tests.py | 59 | 21295 | from __future__ import absolute_import
import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
| apache-2.0 |
penyatree/furl | furl/furl.py | 4 | 52701 | # -*- coding: utf-8 -*-
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
import re
import abc
import warnings
from posixpath import normpath
import six
from six.moves import urllib
from six.moves.urllib.parse import quote, unquote, quote_plus
from .omdict1D import omdict1D
from .compat import basestring, UnicodeMixin
_absent = object()
#
# TODO(grun): Subclass Path, PathCompositionInterface, Query, and
# QueryCompositionInterface into two subclasses each - one for the URL
# and one for the Fragment.
#
# Subclasses will clean up the code because the valid encodings are
# different between a URL Path and a Fragment Path and a URL Query and a
# Fragment Query.
#
# For example, '?' and '#' don't need to be encoded in Fragment Path
# segments but must be encoded in URL Path segments.
#
# Similarly, '#' doesn't need to be encoded in Fragment Query keys and
# values, but must be encoded in URL Query keys and values.
#
# Map of various URL schemes to their default ports. Scheme strings are
# lowercase.
DEFAULT_PORTS = {
'ftp': 21,
'ssh': 22,
'http': 80,
'https': 443,
}
# List of schemes that don't require two slashes after the colon. For example,
# 'mailto:user@google.com' instead of 'mailto://user@google.com'. Scheme
# strings are lowercase.
#
# TODO(grun): Support schemes separated by just ':', not '://' without having
# an explicit list. There are many such schemes in various URIs.
COLON_SEPARATED_SCHEMES = [
'sms',
'tel',
'mailto',
]
def non_text_iterable(value):
b = callable_attr(value, '__iter__') and not isinstance(value, basestring)
return b
class Path(object):
"""
Represents a path comprised of zero or more path segments.
http://tools.ietf.org/html/rfc3986#section-3.3
Path parameters aren't supported.
Attributes:
_force_absolute: Function whos boolean return value specifies
whether self.isabsolute should be forced to True or not. If
_force_absolute(self) returns True, isabsolute is read only and
raises an AttributeError if assigned to. If
_force_absolute(self) returns False, isabsolute is mutable and
can be set to True or False. URL paths use _force_absolute and
return True if the netloc is non-empty (not equal to
''). Fragment paths are never read-only and their
_force_absolute(self) always returns False.
segments: List of zero or more path segments comprising this
path. If the path string has a trailing '/', the last segment
will be '' and self.isdir will be True and self.isfile will be
False. An empty segment list represents an empty path, not '/'
(though they have the same meaning).
isabsolute: Boolean whether or not this is an absolute path or
not. An absolute path starts with a '/'. self.isabsolute is
False if the path is empty (self.segments == [] and str(path) ==
'').
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded path strings are provided to methods that
take such strings, like load(), add(), set(), remove(), etc.
"""
SAFE_SEGMENT_CHARS = ":@-._~!$&'()*+,;="
def __init__(self, path='', force_absolute=lambda _: False, strict=False):
self.segments = []
self.strict = strict
self._isabsolute = False
self._force_absolute = force_absolute
self.load(path)
def load(self, path):
"""
Load <path>, replacing any existing path. <path> can either be a
list of segments or a path string to adopt.
Returns: <self>.
"""
if not path:
segments = []
elif callable_attr(path, 'split'): # String interface.
segments = self._segments_from_path(path)
else: # List interface.
segments = path
if self._force_absolute(self):
self._isabsolute = True if segments else False
else:
self._isabsolute = (segments and segments[0] == '')
if self.isabsolute and len(segments) > 1 and segments[0] == '':
segments.pop(0)
self.segments = [unquote(segment) for segment in segments]
return self
def add(self, path):
"""
Add <path> to the existing path. <path> can either be a list of
segments or a path string to append to the existing path.
Returns: <self>.
"""
newsegments = path # List interface.
if callable_attr(path, 'split'): # String interface.
newsegments = self._segments_from_path(path)
# Preserve the opening '/' if one exists already (self.segments
# == ['']).
if self.segments == [''] and newsegments and newsegments[0] != '':
newsegments.insert(0, '')
segments = self.segments
if self.isabsolute and self.segments and self.segments[0] != '':
segments.insert(0, '')
self.load(join_path_segments(segments, newsegments))
return self
def set(self, path):
self.load(path)
return self
def remove(self, path):
if path is True:
self.load('')
else:
segments = path # List interface.
if isinstance(path, six.string_types): # String interface.
segments = self._segments_from_path(path)
base = ([''] if self.isabsolute else []) + self.segments
self.load(remove_path_segments(base, segments))
return self
def normalize(self):
"""
Normalize the path. Turn '//a/./b/../c//' into '/a/c/'.
Returns: <self>.
"""
if str(self):
normalized = normpath(str(self)) + ('/' * self.isdir)
if normalized.startswith('//'): # http://bugs.python.org/636648
normalized = '/' + normalized.lstrip('/')
self.load(normalized)
return self
@property
def isabsolute(self):
if self._force_absolute(self):
return True
return self._isabsolute
@isabsolute.setter
def isabsolute(self, isabsolute):
"""
Raises: AttributeError if _force_absolute(self) returns True.
"""
if self._force_absolute(self):
s = ('Path.isabsolute is True and read-only for URLs with a netloc'
' (a username, password, host, and/or port). A URL path must '
"start with a '/' to separate itself from a netloc.")
raise AttributeError(s)
self._isabsolute = isabsolute
@property
def isdir(self):
"""
Returns: True if the path ends on a directory, False
otherwise. If True, the last segment is '', representing the
trailing '/' of the path.
"""
return (self.segments == [] or
(self.segments and self.segments[-1] == ''))
@property
def isfile(self):
"""
Returns: True if the path ends on a file, False otherwise. If
True, the last segment is not '', representing some file as the
last segment of the path.
"""
return not self.isdir
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __bool__(self):
return len(self.segments) > 0
__nonzero__ = __bool__
def __str__(self):
segments = list(self.segments)
if self.isabsolute:
if not segments:
segments = ['', '']
else:
segments.insert(0, '')
return self._path_from_segments(segments)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def _segments_from_path(self, path):
"""
Returns: The list of path segments from the path string <path>.
Raises: UserWarning if <path> is an improperly encoded path
string and self.strict is True.
"""
segments = []
for segment in path.split('/'):
if not is_valid_encoded_path_segment(segment):
segment = quote(utf8(segment))
if self.strict:
s = ("Improperly encoded path string received: '%s'. "
"Proceeding, but did you mean '%s'?" %
(path, self._path_from_segments(segments)))
warnings.warn(s, UserWarning)
segments.append(utf8(segment))
# In Python 3, utf8() returns Bytes objects that must be decoded
# into strings before they can be passed to urllib.unquote(). In
# Python 2, utf8() returns strings that can be passed directly
# to urllib.unquote().
segments = [
segment.decode('utf8')
if isinstance(segment, bytes) and not isinstance(segment, str)
else segment for segment in segments]
return [unquote(segment) for segment in segments]
def _path_from_segments(self, segments):
"""
Combine the provided path segments <segments> into a path string. Path
segments in <segments> will be quoted.
Returns: A path string with quoted path segments.
"""
if '%' not in ''.join(segments): # Don't double-encode the path.
segments = [
quote(utf8(attemptstr(segment)), self.SAFE_SEGMENT_CHARS)
for segment in segments]
return '/'.join(segments)
@six.add_metaclass(abc.ABCMeta)
class PathCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a Path.
"""
def __init__(self, strict=False):
"""
Params:
force_absolute: See Path._force_absolute.
Assignments to <self> in __init__() must be added to
__setattr__() below.
"""
self._path = Path(force_absolute=self._force_absolute, strict=strict)
@property
def path(self):
return self._path
@property
def pathstr(self):
"""This method is deprecated. Use str(furl.path) instead."""
s = ('furl.pathstr is deprecated. Use str(furl.path) instead. There '
'should be one, and preferably only one, obvious way to serialize'
' a Path object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._path)
@abc.abstractmethod
def _force_absolute(self, path):
"""
Subclass me.
"""
pass
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == '_path':
self.__dict__[attr] = value
return True
elif attr == 'path':
self._path.load(value)
return True
return False
@six.add_metaclass(abc.ABCMeta)
class URLPathCompositionInterface(PathCompositionInterface):
"""
Abstract class interface for a parent class that contains a URL
Path.
A URL path's isabsolute attribute is absolute and read-only if a
netloc is defined. A path cannot start without '/' if there's a
netloc. For example, the URL 'http://google.coma/path' makes no
sense. It should be 'http://google.com/a/path'.
A URL path's isabsolute attribute is mutable if there's no
netloc. The scheme doesn't matter. For example, the isabsolute
attribute of the URL path in 'mailto:user@domain.com', with scheme
'mailto' and path 'user@domain.com', is mutable because there is no
netloc. See
http://en.wikipedia.org/wiki/URI_scheme#Examples
"""
def __init__(self, strict=False):
PathCompositionInterface.__init__(self, strict=strict)
def _force_absolute(self, path):
return bool(path) and self.netloc
@six.add_metaclass(abc.ABCMeta)
class FragmentPathCompositionInterface(PathCompositionInterface):
"""
Abstract class interface for a parent class that contains a Fragment
Path.
Fragment Paths they be set to absolute (self.isabsolute = True) or
not absolute (self.isabsolute = False).
"""
def __init__(self, strict=False):
PathCompositionInterface.__init__(self, strict=strict)
def _force_absolute(self, path):
return False
class Query(object):
"""
Represents a URL query comprised of zero or more unique parameters
and their respective values.
http://tools.ietf.org/html/rfc3986#section-3.4
All interaction with Query.params is done with unquoted strings. So
f.query.params['a'] = 'a%5E'
means the intended value for 'a' is 'a%5E', not 'a^'.
Query.params is implemented as an omdict1D object - a one
dimensional ordered multivalue dictionary. This provides support for
repeated URL parameters, like 'a=1&a=2'. omdict1D is a subclass of
omdict, an ordered multivalue dictionary. Documentation for omdict
can be found here
https://github.com/gruns/orderedmultidict
The one dimensional aspect of omdict1D means that a list of values
is interpreted as multiple values, not a single value which is
itself a list of values. This is a reasonable distinction to make
because URL query parameters are one dimensional - query parameter
values cannot themselves be composed of sub-values.
So what does this mean? This means we can safely interpret
f = furl('http://www.google.com')
f.query.params['arg'] = ['one', 'two', 'three']
as three different values for 'arg': 'one', 'two', and 'three',
instead of a single value which is itself some serialization of the
python list ['one', 'two', 'three']. Thus, the result of the above
will be
f.query.allitems() == [
('arg','one'), ('arg','two'), ('arg','three')]
and not
f.query.allitems() == [('arg', ['one', 'two', 'three'])]
The latter doesn't make sense because query parameter values cannot
be composed of sub-values. So finally
str(f.query) == 'arg=one&arg=two&arg=three'
Attributes:
params: Ordered multivalue dictionary of query parameter key:value
pairs. Parameters in self.params are maintained URL decoded - 'a
b' not 'a+b'.
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded query strings are provided to methods that
take such strings, like load(), add(), set(), remove(), etc.
"""
SAFE_KEY_CHARS = "/?:@-._~!$'()*,"
SAFE_VALUE_CHARS = "/?:@-._~!$'()*,="
def __init__(self, query='', strict=False):
self.strict = strict
self._params = omdict1D()
self.load(query)
def load(self, query):
self.params.load(self._items(query))
return self
def add(self, args):
for param, value in self._items(args):
self.params.add(param, value)
return self
def set(self, mapping):
"""
Adopt all mappings in <mapping>, replacing any existing mappings
with the same key. If a key has multiple values in <mapping>,
they are all adopted.
Examples:
Query({1:1}).set([(1,None),(2,2)]).params.allitems()
== [(1,None),(2,2)]
Query({1:None,2:None}).set([(1,1),(2,2),(1,11)]).params.allitems()
== [(1,1),(2,2),(1,11)]
Query({1:None}).set([(1,[1,11,111])]).params.allitems()
== [(1,1),(1,11),(1,111)]
Returns: <self>.
"""
self.params.updateall(mapping)
return self
def remove(self, query):
if query is True:
self.load('')
return self
# Single key to remove.
items = [query]
# Dictionary or multivalue dictionary of items to remove.
if callable_attr(query, 'items'):
items = self._items(query)
# List of keys or items to remove.
elif non_text_iterable(query):
items = query
for item in items:
if non_text_iterable(item) and len(item) == 2:
key, value = item
self.params.popvalue(key, value, None)
else:
key = item
self.params.pop(key, None)
return self
@property
def params(self):
return self._params
@params.setter
def params(self, params):
items = self._items(params)
self._params.clear()
for key, value in items:
self._params.add(key, value)
def encode(self, delimiter='&', quote_plus=True, delimeter=_absent):
"""
Examples:
Query('a=a&b=#').encode() == 'a=a&b=%23'
Query('a=a&b=#').encode(';') == 'a=a;b=%23'
Query('a+b=c+d').encode(quote_plus=False) == 'a%20b=c%20d'
Until furl v0.4.6, the 'delimiter' argument was incorrectly
spelled 'delimeter'. For backwards compatibility, accept both
the correct 'delimiter' and the old, mispelled 'delimeter'.
Returns: A URL encoded query string using <delimiter> as the
delimiter separating key:value pairs. The most common and
default delimiter is '&', but ';' can also be specified. ';' is
W3C recommended. Parameter keys and values are encoded
application/x-www-form-urlencoded if <quote_plus> is True,
percent-encoded otherwise.
"""
if delimeter is not _absent:
delimiter = delimeter
pairs = []
sixurl = urllib.parse # six.moves.urllib.parse
quote_func = sixurl.quote_plus if quote_plus else sixurl.quote
for key, value in self.params.iterallitems():
utf8key = utf8(key, utf8(attemptstr(key)))
utf8value = utf8(value, utf8(attemptstr(value)))
quoted_key = quote_func(utf8key, self.SAFE_KEY_CHARS)
quoted_value = quote_func(utf8value, self.SAFE_VALUE_CHARS)
pair = '='.join([quoted_key, quoted_value])
if value is None: # Example: http://sprop.su/?param
pair = quoted_key
pairs.append(pair)
return delimiter.join(pairs)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __bool__(self):
return len(self.params) > 0
__nonzero__ = __bool__
def __str__(self):
return self.encode()
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def _items(self, items):
"""
Extract and return the key:value items from various
containers. Some containers that could hold key:value items are
- List of (key,value) tuples.
- Dictionaries of key:value items.
- Multivalue dictionary of key:value items, with potentially
repeated keys.
- Query string with encoded params and values.
Keys and values are passed through unmodified unless they were
passed in within an encoded query string, like
'a=a%20a&b=b'. Keys and values passed in within an encoded query
string are unquoted by urlparse.parse_qsl(), which uses
urllib.unquote_plus() internally.
Returns: List of items as (key, value) tuples. Keys and values
are passed through unmodified unless they were passed in as part
of an encoded query string, in which case the final keys and
values that are returned will be unquoted.
Raises: UserWarning if <path> is an improperly encoded path
string and self.strict is True.
"""
if not items:
items = []
# Multivalue Dictionary-like interface. e.g. {'a':1, 'a':2,
# 'b':2}
elif callable_attr(items, 'allitems'):
items = list(items.allitems())
elif callable_attr(items, 'iterallitems'):
items = list(items.iterallitems())
# Dictionary-like interface. e.g. {'a':1, 'b':2, 'c':3}
elif callable_attr(items, 'items'):
items = list(six.iteritems(items))
elif callable_attr(items, 'items'):
items = list(items.items())
# Encoded query string. e.g. 'a=1&b=2&c=3'
elif isinstance(items, six.string_types):
items = self._extract_items_from_querystr(items)
# Default to list of key:value items interface. e.g. [('a','1'),
# ('b','2')]
else:
items = list(items)
return items
def _extract_items_from_querystr(self, querystr):
pairstrs = [s2 for s1 in querystr.split('&') for s2 in s1.split(';')]
if self.strict:
pairs = [item.split('=', 1) for item in pairstrs]
pairs = [(p[0], '' if len(p) == 1 else p[1]) for p in pairs]
for key, value in pairs:
valid_key = is_valid_encoded_query_key(key)
valid_value = is_valid_encoded_query_value(value)
if not valid_key or not valid_value:
s = ("Improperly encoded query string received: '%s'. "
"Proceeding, but did you mean '%s'?" %
(querystr, urllib.parse.urlencode(pairs)))
warnings.warn(s, UserWarning)
items = []
parsed_items = urllib.parse.parse_qsl(querystr, keep_blank_values=True)
for (key, value), pairstr in six.moves.zip(parsed_items, pairstrs):
# Empty value without '=', like '?sup'.
if key == quote_plus(utf8(pairstr)):
value = None
items.append((key, value))
return items
@six.add_metaclass(abc.ABCMeta)
class QueryCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a Query.
"""
def __init__(self, strict=False):
self._query = Query(strict=strict)
@property
def query(self):
return self._query
@property
def querystr(self):
"""This method is deprecated. Use str(furl.query) instead."""
s = ('furl.querystr is deprecated. Use str(furl.query) instead. There '
'should be one, and preferably only one, obvious way to serialize'
' a Query object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._query)
@property
def args(self):
"""
Shortcut method to access the query parameters, self._query.params.
"""
return self._query.params
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == 'args' or attr == 'query':
self._query.load(value)
return True
return False
class Fragment(FragmentPathCompositionInterface, QueryCompositionInterface):
"""
Represents a URL fragment, comprised internally of a Path and Query
optionally separated by a '?' character.
http://tools.ietf.org/html/rfc3986#section-3.5
Attributes:
path: Path object from FragmentPathCompositionInterface.
query: Query object from QueryCompositionInterface.
separator: Boolean whether or not a '?' separator should be
included in the string representation of this fragment. When
False, a '?' character will not separate the fragment path from
the fragment query in the fragment string. This is useful to
build fragments like '#!arg1=val1&arg2=val2', where no
separating '?' is desired.
"""
def __init__(self, fragment='', strict=False):
FragmentPathCompositionInterface.__init__(self, strict=strict)
QueryCompositionInterface.__init__(self, strict=strict)
self.strict = strict
self.separator = True
self.load(fragment)
def load(self, fragment):
self.path.load('')
self.query.load('')
toks = fragment.split('?', 1)
if len(toks) == 0:
self._path.load('')
self._query.load('')
elif len(toks) == 1:
# Does this fragment look like a path or a query? Default to
# path.
if '=' in fragment: # Query example: '#woofs=dogs'.
self._query.load(fragment)
else: # Path example: '#supinthisthread'.
self._path.load(fragment)
else:
# Does toks[1] actually look like a query? Like 'a=a' or
# 'a=' or '=a'?
if '=' in toks[1]:
self._path.load(toks[0])
self._query.load(toks[1])
# If toks[1] doesn't look like a query, the user probably
# provided a fragment string like 'a?b?' that was intended
# to be adopted as-is, not a two part fragment with path 'a'
# and query 'b?'.
else:
self._path.load(fragment)
def add(self, path=_absent, args=_absent):
if path is not _absent:
self.path.add(path)
if args is not _absent:
self.query.add(args)
return self
def set(self, path=_absent, args=_absent, separator=_absent):
if path is not _absent:
self.path.load(path)
if args is not _absent:
self.query.load(args)
if separator is True or separator is False:
self.separator = separator
return self
def remove(self, fragment=_absent, path=_absent, args=_absent):
if fragment is True:
self.load('')
if path is not _absent:
self.path.remove(path)
if args is not _absent:
self.query.remove(args)
return self
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
if (not PathCompositionInterface.__setattr__(self, attr, value) and
not QueryCompositionInterface.__setattr__(self, attr, value)):
object.__setattr__(self, attr, value)
def __bool__(self):
return bool(self.path) or bool(self.query)
__nonzero__ = __bool__
def __str__(self):
path, query = str(self._path), str(self._query)
# If there is no query or self.separator is False, decode all
# '?' characters in the path from their percent encoded form
# '%3F' to '?'. This allows for fragment strings containg '?'s,
# like '#dog?machine?yes'.
if path and (not query or not self.separator):
path = path.replace('%3F', '?')
if query and path:
return path + ('?' if self.separator else '') + query
return path + query
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
@six.add_metaclass(abc.ABCMeta)
class FragmentCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a
Fragment.
"""
def __init__(self, strict=False):
self._fragment = Fragment(strict=strict)
@property
def fragment(self):
return self._fragment
@property
def fragmentstr(self):
"""This method is deprecated. Use str(furl.fragment) instead."""
s = ('furl.fragmentstr is deprecated. Use str(furl.fragment) instead. '
'There should be one, and preferably only one, obvious way to '
'serialize a Fragment object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._fragment)
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == 'fragment':
self.fragment.load(value)
return True
return False
class furl(URLPathCompositionInterface, QueryCompositionInterface,
FragmentCompositionInterface, UnicodeMixin):
"""
Object for simple parsing and manipulation of a URL and its
components.
scheme://username:password@host:port/path?query#fragment
Attributes:
DEFAULT_PORTS:
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded path, query, or fragment strings are provided
to methods that take such strings, like load(), add(), set(),
remove(), etc.
username: Username string for authentication. Initially None.
password: Password string for authentication with
<username>. Initially None.
scheme: URL scheme. A string ('http', 'https', '', etc) or None.
All lowercase. Initially None.
host: URL host (domain, IPv4 address, or IPv6 address), not
including port. All lowercase. Initially None.
port: Port. Valid port values are 1-65535, or None meaning no port
specified.
netloc: Network location. Combined host and port string. Initially
None.
path: Path object from URLPathCompositionInterface.
query: Query object from QueryCompositionInterface.
fragment: Fragment object from FragmentCompositionInterface.
"""
def __init__(self, url='', strict=False):
"""
Raises: ValueError on invalid url.
"""
URLPathCompositionInterface.__init__(self, strict=strict)
QueryCompositionInterface.__init__(self, strict=strict)
FragmentCompositionInterface.__init__(self, strict=strict)
self.strict = strict
self.load(url) # Raises ValueError on invalid url.
def load(self, url):
"""
Parse and load a URL.
Raises: ValueError on invalid URL, like a malformed IPv6 address
or invalid port.
"""
self._host = self._port = self._scheme = None
self.username = self.password = self.scheme = None
if not isinstance(url, six.string_types):
url = str(url)
# urlsplit() raises a ValueError on malformed IPv6 addresses in
# Python 2.7+. In Python <= 2.6, urlsplit() doesn't raise a
# ValueError on malformed IPv6 addresses.
tokens = urlsplit(url)
self.netloc = tokens.netloc # Raises ValueError in Python 2.7+.
self.scheme = tokens.scheme
if not self.port:
self._port = DEFAULT_PORTS.get(self.scheme)
self.path.load(tokens.path)
self.query.load(tokens.query)
self.fragment.load(tokens.fragment)
return self
@property
def scheme(self):
return self._scheme
@scheme.setter
def scheme(self, scheme):
if callable_attr(scheme, 'lower'):
scheme = scheme.lower()
self._scheme = scheme
@property
def host(self):
return self._host
@host.setter
def host(self, host):
"""
Raises: ValueError on malformed IPv6 address.
"""
urllib.parse.urlsplit('http://%s/' % host) # Raises ValueError.
self._host = host
@property
def port(self):
return self._port
@port.setter
def port(self, port):
"""
A port value can 1-65535 or None meaning no port specified. If <port>
is None and self.scheme is a known scheme in DEFAULT_PORTS, the default
port value from DEFAULT_PORTS will be used.
Raises: ValueError on invalid port.
"""
if port is None:
self._port = DEFAULT_PORTS.get(self.scheme)
elif is_valid_port(port):
self._port = int(str(port))
else:
raise ValueError("Invalid port: '%s'" % port)
@property
def netloc(self):
userpass = self.username or ''
if self.password is not None:
userpass += ':' + self.password
if userpass or self.username is not None:
userpass += '@'
netloc = self.host or ''
if self.port and self.port != DEFAULT_PORTS.get(self.scheme):
netloc += ':' + str(self.port)
netloc = ((userpass or '') + (netloc or ''))
return netloc if (netloc or self.host == '') else None
@netloc.setter
def netloc(self, netloc):
"""
Params:
netloc: Network location string, like 'google.com' or
'google.com:99'.
Raises: ValueError on invalid port or malformed IPv6 address.
"""
# Raises ValueError on malformed IPv6 addresses.
urllib.parse.urlsplit('http://%s/' % netloc)
username = password = host = port = None
if '@' in netloc:
userpass, netloc = netloc.split('@', 1)
if ':' in userpass:
username, password = userpass.split(':', 1)
else:
username = userpass
if ':' in netloc:
# IPv6 address literal.
if ']' in netloc:
colonpos, bracketpos = netloc.rfind(':'), netloc.rfind(']')
if colonpos > bracketpos and colonpos != bracketpos + 1:
raise ValueError("Invalid netloc: '%s'" % netloc)
elif colonpos > bracketpos and colonpos == bracketpos + 1:
host, port = netloc.rsplit(':', 1)
else:
host = netloc.lower()
else:
host, port = netloc.rsplit(':', 1)
host = host.lower()
else:
host = netloc.lower()
# Avoid side effects by assigning self.port before self.host so
# that if an exception is raised when assigning self.port,
# self.host isn't updated.
self.port = port # Raises ValueError on invalid port.
self.host = host or None
self.username = username or None
self.password = password or None
@property
def url(self):
return self.tostr()
@url.setter
def url(self, url):
return self._parse(url)
def add(self, args=_absent, path=_absent, fragment_path=_absent,
fragment_args=_absent, query_params=_absent):
"""
Add components to a URL and return this furl instance, <self>.
If both <args> and <query_params> are provided, a UserWarning is
raised because <args> is provided as a shortcut for
<query_params>, not to be used simultaneously with
<query_params>. Nonetheless, providing both <args> and
<query_params> behaves as expected, with query keys and values
from both <args> and <query_params> added to the query - <args>
first, then <query_params>.
Parameters:
args: Shortcut for <query_params>.
path: A list of path segments to add to the existing path
segments, or a path string to join with the existing path
string.
query_params: A dictionary of query keys and values or list of
key:value items to add to the query.
fragment_path: A list of path segments to add to the existing
fragment path segments, or a path string to join with the
existing fragment path string.
fragment_args: A dictionary of query keys and values or list
of key:value items to add to the fragment's query.
Returns: <self>.
Raises: UserWarning if redundant and possibly conflicting <args> and
<query_params> were provided.
"""
if args is not _absent and query_params is not _absent:
s = ('Both <args> and <query_params> provided to furl.add(). '
'<args> is a shortcut for <query_params>, not to be used '
'with <query_params>. See furl.add() documentation for more '
'details.')
warnings.warn(s, UserWarning)
if path is not _absent:
self.path.add(path)
if args is not _absent:
self.query.add(args)
if query_params is not _absent:
self.query.add(query_params)
if fragment_path is not _absent or fragment_args is not _absent:
self.fragment.add(path=fragment_path, args=fragment_args)
return self
def set(self, args=_absent, path=_absent, fragment=_absent, scheme=_absent,
netloc=_absent, fragment_path=_absent, fragment_args=_absent,
fragment_separator=_absent, host=_absent, port=_absent,
query=_absent, query_params=_absent, username=_absent,
password=_absent):
"""
Set components of a url and return this furl instance, <self>.
If any overlapping, and hence possibly conflicting, parameters
are provided, appropriate UserWarning's will be raised. The
groups of parameters that could potentially overlap are
<netloc> and (<host> or <port>)
<fragment> and (<fragment_path> and/or <fragment_args>)
any two or all of <query>, <args>, and/or <query_params>
In all of the above groups, the latter parameter(s) take
precedence over the earlier parameter(s). So, for example
furl('http://google.com/').set(
netloc='yahoo.com:99', host='bing.com', port=40)
will result in a UserWarning being raised and the url becoming
'http://bing.com:40/'
not
'http://yahoo.com:99/
Parameters:
args: Shortcut for <query_params>.
path: A list of path segments or a path string to adopt.
fragment: Fragment string to adopt.
scheme: Scheme string to adopt.
netloc: Network location string to adopt.
query: Query string to adopt.
query_params: A dictionary of query keys and values or list of
key:value items to adopt.
fragment_path: A list of path segments to adopt for the
fragment's path or a path string to adopt as the fragment's
path.
fragment_args: A dictionary of query keys and values or list
of key:value items for the fragment's query to adopt.
fragment_separator: Boolean whether or not there should be a
'?' separator between the fragment path and fragment query.
host: Host string to adopt.
port: Port number to adopt.
username: Username string to adopt.
password: Password string to adopt.
Raises:
ValueError on invalid port.
UserWarning if <netloc> and (<host> and/or <port>) are
provided.
UserWarning if <query>, <args>, and/or <query_params> are
provided.
UserWarning if <fragment> and (<fragment_path>,
<fragment_args>, and/or <fragment_separator>) are provided.
Returns: <self>.
"""
netloc_present = netloc is not _absent
if (netloc_present and (host is not _absent or port is not _absent)):
s = ('Possible parameter overlap: <netloc> and <host> and/or '
'<port> provided. See furl.set() documentation for more '
'details.')
warnings.warn(s, UserWarning)
if ((args is not _absent and query is not _absent) or
(query is not _absent and query_params is not _absent) or
(args is not _absent and query_params is not _absent)):
s = ('Possible parameter overlap: <query>, <args>, and/or '
'<query_params> provided. See furl.set() documentation for '
'more details.')
warnings.warn(s, UserWarning)
if (fragment is not _absent and
(fragment_path is not _absent or fragment_args is not _absent or
(fragment_separator is not _absent))):
s = ('Possible parameter overlap: <fragment> and '
'(<fragment_path>and/or <fragment_args>) or <fragment> '
'and <fragment_separator> provided. See furl.set() '
'documentation for more details.')
warnings.warn(s, UserWarning)
# Avoid side effects if exceptions are raised.
oldnetloc, oldport = self.netloc, self.port
try:
if netloc is not _absent:
# Raises ValueError on invalid port or malformed IP.
self.netloc = netloc
if port is not _absent:
self.port = port # Raises ValueError on invalid port.
except ValueError:
self.netloc, self.port = oldnetloc, oldport
raise
if username is not _absent:
self.username = username
if password is not _absent:
self.password = password
if scheme is not _absent:
self.scheme = scheme
if host is not _absent:
self.host = host
if path is not _absent:
self.path.load(path)
if query is not _absent:
self.query.load(query)
if args is not _absent:
self.query.load(args)
if query_params is not _absent:
self.query.load(query_params)
if fragment is not _absent:
self.fragment.load(fragment)
if fragment_path is not _absent:
self.fragment.path.load(fragment_path)
if fragment_args is not _absent:
self.fragment.query.load(fragment_args)
if fragment_separator is not _absent:
self.fragment.separator = fragment_separator
return self
def remove(self, args=_absent, path=_absent, fragment=_absent,
query=_absent, query_params=_absent, port=False,
fragment_path=_absent, fragment_args=_absent, username=False,
password=False):
"""
Remove components of this furl's URL and return this furl
instance, <self>.
Parameters:
args: Shortcut for query_params.
path: A list of path segments to remove from the end of the
existing path segments list, or a path string to remove from
the end of the existing path string, or True to remove the
path entirely.
query: If True, remove the query portion of the URL entirely.
query_params: A list of query keys to remove from the query,
if they exist.
port: If True, remove the port from the network location
string, if it exists.
fragment: If True, remove the fragment portion of the URL
entirely.
fragment_path: A list of path segments to remove from the end
of the fragment's path segments or a path string to remove
from the end of the fragment's path string.
fragment_args: A list of query keys to remove from the
fragment's query, if they exist.
username: If True, remove the username, if it exists.
password: If True, remove the password, if it exists.
Returns: <self>.
"""
if port is True:
self.port = None
if username is True:
self.username = None
if password is True:
self.password = None
if path is not _absent:
self.path.remove(path)
if args is not _absent:
self.query.remove(args)
if query is not _absent:
self.query.remove(query)
if fragment is not _absent:
self.fragment.remove(fragment)
if query_params is not _absent:
self.query.remove(query_params)
if fragment_path is not _absent:
self.fragment.path.remove(fragment_path)
if fragment_args is not _absent:
self.fragment.query.remove(fragment_args)
return self
def tostr(self, query_delimiter='&', query_quote_plus=True):
url = urllib.parse.urlunsplit((
self.scheme or '', # Must be text type in Python 3.
self.netloc,
str(self.path),
self.query.encode(query_delimiter, query_quote_plus),
str(self.fragment),
))
# Special cases.
if self.scheme is None:
if url.startswith('//'):
url = url[2:]
elif url.startswith('://'):
url = url[3:]
elif self.scheme in COLON_SEPARATED_SCHEMES:
# Change a '://' separator to ':'. Leave a ':' separator as-is.
url = _set_scheme(url, self.scheme)
elif (self.scheme is not None and
(url == '' or # Protocol relative URL.
(url == '%s:' % self.scheme and not str(self.path)))):
url += '//'
return url
def join(self, url):
self.load(urljoin(self.url, str(url)))
return self
def copy(self):
return self.__class__(self)
def __eq__(self, other):
try:
return self.url == other.url
except AttributeError:
return None
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
if (not PathCompositionInterface.__setattr__(self, attr, value) and
not QueryCompositionInterface.__setattr__(self, attr, value) and
not FragmentCompositionInterface.__setattr__(self, attr, value)):
object.__setattr__(self, attr, value)
def __unicode__(self):
return self.tostr()
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def _get_scheme(url):
if url.lstrip().startswith('//'): # Protocol relative URL.
return ''
beforeColon = url[:max(0, url.find(':'))]
if beforeColon in COLON_SEPARATED_SCHEMES:
return beforeColon
return url[:max(0, url.find('://'))] or None
def _set_scheme(url, newscheme):
scheme = _get_scheme(url)
newscheme = newscheme or ''
newseparator = ':' if newscheme in COLON_SEPARATED_SCHEMES else '://'
if scheme == '': # Protocol relative URL.
url = '%s:%s' % (newscheme, url)
elif scheme is None and url: # No scheme.
url = ''.join([newscheme, newseparator, url])
elif scheme: # Existing scheme.
remainder = url[len(scheme):]
if remainder.startswith('://'):
remainder = remainder[3:]
elif remainder.startswith(':'):
remainder = remainder[1:]
url = ''.join([newscheme, newseparator, remainder])
return url
def urlsplit(url):
"""
Parameters:
url: URL string to split.
Returns: urlparse.SplitResult tuple subclass, just like
urlparse.urlsplit() returns, with fields (scheme, netloc, path,
query, fragment, username, password, hostname, port). See the url
below for more details on urlsplit().
http://docs.python.org/library/urlparse.html#urlparse.urlsplit
"""
original_scheme = _get_scheme(url)
def _change_urltoks_scheme(tup, scheme):
l = list(tup)
l[0] = scheme
return tuple(l)
# urlsplit() only parses the query for schemes in urlparse.uses_query,
# so switch to 'http' (a scheme in urlparse.uses_query) for
# urlparse.urlsplit() and switch back afterwards.
if original_scheme is not None:
url = _set_scheme(url, 'http')
toks = urllib.parse.urlsplit(url)
toks_orig_scheme = _change_urltoks_scheme(toks, original_scheme)
return urllib.parse.SplitResult(*toks_orig_scheme)
def urljoin(base, url):
"""
Parameters:
base: Base URL to join with <url>.
url: Relative or absolute URL to join with <base>.
Returns: The resultant URL from joining <base> and <url>.
"""
base_scheme, url_scheme = urlsplit(base).scheme, urlsplit(url).scheme
httpbase = _set_scheme(base, 'http')
joined = urllib.parse.urljoin(httpbase, url)
if not url_scheme:
joined = _set_scheme(joined, base_scheme)
return joined
def join_path_segments(*args):
"""
Join multiple lists of path segments together, intelligently
handling path segments borders to preserve intended slashes of the
final constructed path.
This function is not encoding aware. It doesn't test for, or change,
the encoding of path segments it is passed.
Examples:
join_path_segments(['a'], ['b']) == ['a','b']
join_path_segments(['a',''], ['b']) == ['a','b']
join_path_segments(['a'], ['','b']) == ['a','b']
join_path_segments(['a',''], ['','b']) == ['a','','b']
join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
Returns: A list containing the joined path segments.
"""
finals = []
for segments in args:
if not segments or segments == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments = segments[1:]
finals.extend(segments)
return finals
def remove_path_segments(segments, remove):
"""
Removes the path segments of <remove> from the end of the path
segments <segments>.
Examples:
# '/a/b/c' - 'b/c' == '/a/'
remove_path_segments(['','a','b','c'], ['b','c']) == ['','a','']
# '/a/b/c' - '/b/c' == '/a'
remove_path_segments(['','a','b','c'], ['','b','c']) == ['','a']
Returns: The list of all remaining path segments after the segments
in <remove> have been removed from the end of <segments>. If no
segments from <remove> were removed from <segments>, <segments> is
returned unmodified.
"""
# [''] means a '/', which is properly represented by ['', ''].
if segments == ['']:
segments.append('')
if remove == ['']:
remove.append('')
ret = None
if remove == segments:
ret = []
elif len(remove) > len(segments):
ret = segments
else:
toremove = list(remove)
if len(remove) > 1 and remove[0] == '':
toremove.pop(0)
if toremove and toremove == segments[-1 * len(toremove):]:
ret = segments[:len(segments) - len(toremove)]
if remove[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret
def attemptstr(o):
try:
return str(o)
except:
return o
def utf8(o, default=_absent):
try:
return o.encode('utf8')
except:
return o if default is _absent else default
def is_valid_port(port):
port = str(port)
if not port.isdigit() or int(port) == 0 or int(port) > 65535:
return False
return True
def callable_attr(obj, attr):
return hasattr(obj, attr) and callable(getattr(obj, attr))
#
# TODO(grun): These regex functions need to be expanded to reflect the
# fact that the valid encoding for a URL Path segment is different from
# a Fragment Path segment, and valid URL Query key and value encoding
# is different than valid Fragment Query key and value encoding.
#
# For example, '?' and '#' don't need to be encoded in Fragment Path
# segments but they must be encoded in URL Path segments.
#
# Similarly, '#' doesn't need to be encoded in Fragment Query keys and
# values, but must be encoded in URL Query keys and values.
#
# Perhaps merge them with URLPath, FragmentPath, URLQuery, and
# FragmentQuery when those new classes are created (see the TODO
# currently at the top of the source, 02/03/2012).
#
# RFC 3986
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
#
# pct-encoded = "%" HEXDIG HEXDIG
#
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
#
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
#
# ====
# Path
# ====
# segment = *pchar
#
# =====
# Query
# =====
# query = *( pchar / "/" / "?" )
#
VALID_ENCODED_PATH_SEGMENT_REGEX = re.compile(
r'^([\w\-\.\~\:\@\!\$\&\'\(\)\*\+\,\;\=]|(\%[\da-fA-F][\da-fA-F]))*$')
def is_valid_encoded_path_segment(segment):
return bool(VALID_ENCODED_PATH_SEGMENT_REGEX.match(segment))
VALID_ENCODED_QUERY_KEY_REGEX = re.compile(
r'^([\w\-\.\~\:\@\!\$\&\'\(\)\*\+\,\;\/\?]|(\%[\da-fA-F][\da-fA-F]))*$')
def is_valid_encoded_query_key(key):
return bool(VALID_ENCODED_QUERY_KEY_REGEX.match(key))
VALID_ENCODED_QUERY_VALUE_REGEX = re.compile(
r'^([\w\-\.\~\:\@\!\$\&\'\(\)\*\+\,\;\/\?\=]|(\%[\da-fA-F][\da-fA-F]))*$')
def is_valid_encoded_query_value(value):
return bool(VALID_ENCODED_QUERY_VALUE_REGEX.match(value))
| unlicense |
shoopio/shoop | shuup_tests/admin/test_order_detail_extensibility.py | 2 | 4561 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from bs4 import BeautifulSoup
from django.core.urlresolvers import reverse
from shuup.admin.modules.orders.utils import OrderInformation
from shuup.admin.modules.orders.views.detail import OrderDetailView
from shuup.apps.provides import override_provides
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
from shuup.testing.utils import apply_request_middleware
@pytest.mark.django_db
@pytest.mark.parametrize("has_price", (True, False))
def test_order_detail_has_default_toolbar_action_items(rf, admin_user, has_price):
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier, has_price)
request = apply_request_middleware(rf.get("/"), user=admin_user)
view_func = OrderDetailView.as_view()
create_payment_url = reverse("shuup_admin:order.create-payment", kwargs={"pk": order.pk})
set_paid_url = reverse("shuup_admin:order.set-paid", kwargs={"pk": order.pk})
with override_provides("admin_order_toolbar_action_item", [
"shuup.admin.modules.orders.toolbar:CreatePaymentAction",
"shuup.admin.modules.orders.toolbar:SetPaidAction",
]):
if has_price:
assert _check_if_link_exists(view_func, request, order, create_payment_url)
else:
assert _check_if_button_exists(view_func, request, order, set_paid_url)
with override_provides("admin_order_toolbar_action_item", []):
assert not _check_if_link_exists(view_func, request, order, create_payment_url)
def _check_if_button_exists(view_func, request, order, url):
response = view_func(request, pk=order.pk)
soup = BeautifulSoup(response.render().content)
for dropdown_btn in soup.find_all("button", {"class": "dropdown-item"}):
if dropdown_btn.get("formaction", "") == url:
return True
return False
def _check_if_link_exists(view_func, request, order, url):
response = view_func(request, pk=order.pk)
soup = BeautifulSoup(response.render().content)
for dropdown_link in soup.find_all("a", {"class": "dropdown-item"}):
if dropdown_link.get("href", "") == url:
return True
return False
def _get_order(shop, supplier, has_price):
order = create_empty_order(shop=shop)
order.full_clean()
order.save()
for product_data in _get_product_data(has_price):
quantity = product_data.pop("quantity")
tax_rate = product_data.pop("tax_rate")
product = create_product(
sku=product_data.pop("sku"),
shop=shop,
supplier=supplier,
**product_data)
add_product_to_order(
order, supplier, product, quantity=quantity,
taxless_base_unit_price=product_data["default_price"], tax_rate=tax_rate)
order.cache_prices()
order.check_all_verified()
order.save()
return order
def _get_product_data(has_price):
return [
{
"sku": "sku1234",
"default_price": decimal.Decimal("123" if has_price else "0"),
"quantity": decimal.Decimal("1"),
"tax_rate": decimal.Decimal("0.24"),
},
{
"sku": "sku2345",
"default_price": decimal.Decimal("15" if has_price else "0"),
"quantity": decimal.Decimal("1"),
"tax_rate": decimal.Decimal("0.24"),
}
]
class PaymentMethodName(OrderInformation):
title = "Extra information row"
@property
def information(self):
return "This is row data"
@pytest.mark.django_db
def test_order_detail_info_row_extend(rf, admin_user):
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier, True)
request = apply_request_middleware(rf.get("/"), user=admin_user)
view_func = OrderDetailView.as_view()
# Test that we can insert extra information rows into Order detail page
with override_provides("admin_order_information", [
"shuup_tests.admin.test_order_detail_extensibility:PaymentMethodName",
]):
response = view_func(request, pk=order.pk)
soup = BeautifulSoup(response.render().content)
assert soup.find_all(text="This is row data")
| agpl-3.0 |
Yonjuni/wason-selection-task | wason_selection_task/settings.py | 1 | 2158 | """
Django settings for wason_selection_task project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&-2mrf61i#&^es9!(#t4esp%!+=anipws7-@1m)toc!5-m^df!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'selectionTask',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wason_selection_task.urls'
WSGI_APPLICATION = 'wason_selection_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'jp-JP'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "selectionTask/static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "static") | gpl-3.0 |
sgzsh269/django | tests/from_db_value/tests.py | 399 | 1075 | from django.db import connection
from django.db.models import Max
from django.test import TestCase
from .models import Cash, CashModel
class FromDBValueTest(TestCase):
def setUp(self):
CashModel.objects.create(cash='12.50')
def test_simple_load(self):
instance = CashModel.objects.get()
self.assertIsInstance(instance.cash, Cash)
def test_values_list(self):
values_list = CashModel.objects.values_list('cash', flat=True)
self.assertIsInstance(values_list[0], Cash)
def test_values(self):
values = CashModel.objects.values('cash')
self.assertIsInstance(values[0]['cash'], Cash)
def test_aggregation(self):
maximum = CashModel.objects.aggregate(m=Max('cash'))['m']
self.assertIsInstance(maximum, Cash)
def test_defer(self):
instance = CashModel.objects.defer('cash').get()
self.assertIsInstance(instance.cash, Cash)
def test_connection(self):
instance = CashModel.objects.get()
self.assertEqual(instance.cash.vendor, connection.vendor)
| bsd-3-clause |
Diptanshu8/zulip | bots/zephyr_mirror.py | 13 | 3067 | #!/usr/bin/env python
# Copyright (C) 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import print_function
import sys
import subprocess
import os
import traceback
import signal
from .zephyr_mirror_backend import parse_args
(options, args) = parse_args()
sys.path[:0] = [os.path.join(options.root_path, 'api')]
from types import FrameType
from typing import Any
def die(signal, frame):
# type: (int, FrameType) -> None
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
signal.signal(signal.SIGINT, die)
from zulip import RandomExponentialBackoff
args = [os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py")]
args.extend(sys.argv[1:])
if options.sync_subscriptions:
subprocess.call(args)
sys.exit(0)
if options.forward_class_messages and not options.noshard:
sys.path.append("/home/zulip/zulip")
if options.on_startup_command is not None:
subprocess.call([options.on_startup_command])
from zerver.lib.parallel import run_parallel
print("Starting parallel zephyr class mirroring bot")
jobs = list("0123456789abcdef")
def run_job(shard):
# type: (str) -> int
subprocess.call(args + ["--shard=%s" % (shard,)])
return 0
for (status, job) in run_parallel(run_job, jobs, threads=16):
print("A mirroring shard died!")
pass
sys.exit(0)
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print("Starting zephyr mirroring bot")
try:
subprocess.call(args)
except Exception:
traceback.print_exc()
backoff.fail()
error_message = """
ERROR: The Zephyr mirroring bot is unable to continue mirroring Zephyrs.
This is often caused by failing to maintain unexpired Kerberos tickets
or AFS tokens. See https://zulip.com/zephyr for documentation on how to
maintain unexpired Kerberos tickets and AFS tokens.
"""
print(error_message)
sys.exit(1)
| apache-2.0 |
idispatch/yaml-cpp | test/gmock-1.7.0/test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| mit |
damdam-s/OpenUpgrade | addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ipylypiv/grpc | src/python/grpcio_tests/tests/unit/_rpc_test.py | 21 | 36230 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
# TODO(https://github.com/grpc/grpc/issues/8483): test the values
# returned by these methods rather than only "smoke" testing that
# the return after having been called.
servicer_context.is_active()
servicer_context.time_remaining()
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class RPCTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(self._server_pool)
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(None)
self._server_pool.shutdown(wait=True)
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary('NoSuchMethod')(request)
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request,
metadata=(('test',
'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(
self._handler.handle_unary_stream(request, None))
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=(
('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request_iterator,
metadata=(
('test',
'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(b'\x77\x58'
for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
first_response = multi_callable(
first_request, metadata=(('test', 'SequentialInvocations'),))
second_response = multi_callable(
second_request, metadata=(('test', 'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable,
request_iterator,
metadata=(('test', 'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(response_future.result()
for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(response_future.result()
for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = _unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
('test',
'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(
wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),))
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(
('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingTooManyStreamResponsesStreamRequest(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(
('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH):
next(response_iterator)
for _ in range(test_constants.STREAM_LENGTH):
with self.assertRaises(StopIteration):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.OK, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
def testCancelledUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request,
metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
self._control.block_until_paused()
response_iterator.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertIs(grpc.StatusCode.CANCELLED,
exception_context.exception.code())
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
response_iterator.cancel()
with self.assertRaises(grpc.RpcError):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(
('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(
('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.RpcError)
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.FutureTimeoutError):
response_future.result(timeout=test_constants.SHORT_TIMEOUT /
2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
metadata=(
('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN,
response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context:
with self._control.fail():
response_iterator = multi_callable(
request,
metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x47\x58'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=(
('test', 'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'FailedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
multi_callable(
request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x18'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
requests = tuple(b'\x67\x88'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
multi_callable(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
evanccnyc/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 35 | 9734 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
rmoorman/djangoproject.com | tracdb/stats.py | 8 | 1896 | """
Various queries for grabbing interesting user stats from Trac.
"""
import operator
from collections import OrderedDict
import django.db
from .models import Attachment, Revision, Ticket, TicketChange
_statfuncs = []
def stat(title):
"""
Register a function as a "stat"
The function should take a username and return a number.
"""
def _inner(f):
_statfuncs.append(f)
f.title = title
return f
return _inner
def get_user_stats(username):
stats = OrderedDict()
for func in sorted(_statfuncs, key=operator.attrgetter('title')):
stats[func.title] = func(username)
return stats
@stat('Commits')
def commit_count(username):
return Revision.objects.filter(author=username).count()
@stat('Tickets closed')
def tickets_closed(username):
# Raw query so that we can do COUNT(DISTINCT ticket).
q = """SELECT COUNT(DISTINCT ticket) FROM ticket_change
WHERE author = %s AND field = 'status' AND newvalue = 'closed';"""
return run_single_value_query(q, username)
@stat('Tickets opened')
def tickets_opened(username):
return Ticket.objects.filter(reporter=username).count()
@stat('New tickets reviewed')
def new_tickets_reviewed(username):
# We don't want to de-dup as for tickets_closed: multiple reviews of the
# same ticket should "count" as a review.
qs = TicketChange.objects.filter(author=username, field='stage', oldvalue='Unreviewed')
qs = qs.exclude(newvalue='Unreviewed')
return qs.count()
@stat('Patches submitted')
def patches_submitted(username):
return Attachment.objects.filter(author=username).count()
def run_single_value_query(query, *params):
"""
Helper: run a query returning a single value (e.g. a COUNT) and return the value.
"""
c = django.db.connections['trac'].cursor()
c.execute(query, params)
return c.fetchone()[0]
| bsd-3-clause |
Jenselme/servo | tests/wpt/harness/wptrunner/config.py | 196 | 1851 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ConfigParser
import os
import sys
from collections import OrderedDict
here = os.path.split(__file__)[0]
class ConfigDict(dict):
def __init__(self, base_path, *args, **kwargs):
self.base_path = base_path
dict.__init__(self, *args, **kwargs)
def get_path(self, key, default=None):
if key not in self:
return default
path = self[key]
os.path.expanduser(path)
return os.path.abspath(os.path.join(self.base_path, path))
def read(config_path):
config_path = os.path.abspath(config_path)
config_root = os.path.split(config_path)[0]
parser = ConfigParser.SafeConfigParser()
success = parser.read(config_path)
assert config_path in success, success
subns = {"pwd": os.path.abspath(os.path.curdir)}
rv = OrderedDict()
for section in parser.sections():
rv[section] = ConfigDict(config_root)
for key in parser.options(section):
rv[section][key] = parser.get(section, key, False, subns)
return rv
def path(argv=None):
if argv is None:
argv = []
path = None
for i, arg in enumerate(argv):
if arg == "--config":
if i + 1 < len(argv):
path = argv[i + 1]
elif arg.startswith("--config="):
path = arg.split("=", 1)[1]
if path is not None:
break
if path is None:
if os.path.exists("wptrunner.ini"):
path = os.path.abspath("wptrunner.ini")
else:
path = os.path.join(here, "..", "wptrunner.default.ini")
return os.path.abspath(path)
def load():
return read(path(sys.argv))
| mpl-2.0 |
EdPassos/fofix | src/constants.py | 12 | 1739 | #####################################################################
# Frets on Fire X (FoFiX) #
# Copyright (C) 2011 FoFiX Team #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
# Horizontal alignments
LEFT = 0
CENTER = 1
RIGHT = 2
# Vertical alignments
TOP = 0
MIDDLE = 1
BOTTOM = 2
# Stretching constants
FIT_WIDTH = 1
FIT_HEIGHT = 2
FULL_SCREEN = 3
KEEP_ASPECT = 4
# Screen sizing scalers
SCREEN_WIDTH = 640.0
SCREEN_HEIGHT = 480.0
#set of values that define as true when loading string values from a file
def isTrue(value):
return value in ["1", "true", "yes", "on"]
| gpl-2.0 |
mythmon/kitsune | kitsune/sumo/tests/test_email_utils.py | 6 | 5224 | from mock import patch
from nose.tools import eq_
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.translation import get_language
from django.utils.functional import lazy
from kitsune.sumo.email_utils import (safe_translation,
emails_with_users_and_watches)
from kitsune.sumo.utils import uselocale
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import UserFactory
mock_translations = {
'Hello': {
'en-us': 'Hello',
'fr': 'Bonjour',
'es': 'Hola',
},
'Hello {name}': {
'en-us': 'Hello {name}',
'fr': 'Bonjour {0}',
'es': 'Hola {name}',
}
}
def mock_ugettext(msg_id):
locale = get_language()
return mock_translations[msg_id][locale]
mock_ugettext_lazy = lazy(mock_ugettext)
def mock_gettext(f):
f = patch('django.utils.translation.ugettext', mock_ugettext)(f)
f = patch('django.utils.translation.ugettext_lazy', mock_ugettext_lazy)(f)
return f
class SafeTranslationTests(TestCase):
def setUp(self):
# These tests assume English is the fall back language. If it
# isn't we are gonna have a bad time.
eq_('en-US', settings.WIKI_DEFAULT_LANGUAGE)
@mock_gettext
def test_mocked_gettext(self):
"""I'm not entirely sure about the mocking, so test that."""
# Import translation now so it is affected by the mock.
from django.utils.translation import ugettext as _
with uselocale('en-US'):
eq_(_('Hello'), 'Hello')
with uselocale('fr'):
eq_(_('Hello'), 'Bonjour')
with uselocale('es'):
eq_(_('Hello'), 'Hola')
@mock_gettext
def test_safe_translation_noop(self):
"""Test that safe_translation doesn't mess with good translations."""
# Import translation now so it is affected by the mock.
from django.utils.translation import ugettext as _
@safe_translation
def simple(locale):
return _('Hello')
# These should just work normally.
eq_(simple('en-US'), 'Hello')
eq_(simple('fr'), 'Bonjour')
eq_(simple('es'), 'Hola')
@mock_gettext
def test_safe_translation_bad_trans(self):
"""Test that safe_translation insulates from bad translations."""
# Import translation now so it is affected by the mock.
from django.utils.translation import ugettext as _
# `safe_translation` will call this with the given locale, and
# if that fails, fall back to English.
@safe_translation
def bad_trans(locale):
return _('Hello {name}').format(name='Mike')
# French should come back as English, because it has a bad
# translation, but Spanish should come back in Spanish.
eq_(bad_trans('en-US'), 'Hello Mike')
eq_(bad_trans('fr'), 'Hello Mike')
eq_(bad_trans('es'), 'Hola Mike')
@mock_gettext
@patch('kitsune.sumo.email_utils.log')
def test_safe_translation_logging(self, mocked_log):
"""Logging translation errors is really important, so test it."""
# Import translation now so it is affected by the mock.
from django.utils.translation import ugettext as _
# Assert that bad translations cause error logging.
@safe_translation
def bad_trans(locale):
return _('Hello {name}').format(name='Mike')
# English and Spanish should not log anything. French should.
bad_trans('en-US')
bad_trans('es')
eq_(len(mocked_log.method_calls), 0)
bad_trans('fr')
eq_(len(mocked_log.method_calls), 1)
method_name, method_args, method_kwargs = mocked_log.method_calls[0]
eq_(method_name, 'exception')
assert 'Bad translation' in method_args[0]
eq_(method_args[1], 'fr')
class UseLocaleTests(TestCase):
def test_uselocale(self):
"""Test that uselocale does what it says on the tin."""
with uselocale('en-US'):
eq_(get_language(), 'en-us')
with uselocale('de'):
eq_(get_language(), 'de')
with uselocale('fr'):
eq_(get_language(), 'fr')
class PremailerTests(TestCase):
def test_styles_inlining(self):
"""Test that styles tags are converted to inline styles"""
with patch('kitsune.sumo.email_utils.render_to_string') as mocked:
mocked.return_value = ('<html>'
'<head>'
'<style>a { color: #000; }</style>'
'</head>'
'<body>'
'<a href="/test">Hyperlink</a>'
'</body>'
'</html>')
u = UserFactory()
msg = emails_with_users_and_watches('test', 'a.ltxt', 'a.html', {}, [(u, [None])])
for m in msg:
tag = ('<a href="https://%s/test" style="color:#000">Hyperlink</a>')
self.assertIn(tag % Site.objects.get_current().domain,
str(m.message()))
| bsd-3-clause |
WSCU/crazyflie_ros | build/lib.linux-x86_64-2.7/cfclient/utils/input.py | 4 | 12088 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Module to read input devices and send controls to the Crazyflie.
This module reads input from joysticks or other input devices and sends control
set-points to the Crazyflie. It can be configured in the UI.
Various drivers can be used to read input device data. Currently is uses the
PyGame driver, but in the future native support will be provided for Linux and
Windows drivers.
The input device's axes and buttons are mapped to software inputs using a
configuration file.
"""
__author__ = 'Bitcraze AB'
__all__ = ['JoystickReader']
import sys
import os
import re
import glob
import traceback
import logging
import shutil
logger = logging.getLogger(__name__)
from cfclient.utils.pygamereader import PyGameReader
from cfclient.utils.config import Config
from cfclient.utils.config_manager import ConfigManager
from cfclient.utils.periodictimer import PeriodicTimer
from cflib.utils.callbacks import Caller
MAX_THRUST = 65000
class JoystickReader:
"""
Thread that will read input from devices/joysticks and send control-set
ponts to the Crazyflie
"""
inputConfig = []
def __init__(self, do_device_discovery=True):
# TODO: Should be OS dependant
self.inputdevice = PyGameReader()
self._min_thrust = 0
self._max_thrust = 0
self._thrust_slew_rate = 0
self._thrust_slew_enabled = False
self._thrust_slew_limit = 0
self._emergency_stop = False
self._has_pressure_sensor = False
self._old_thrust = 0
self._old_alt_hold = False
self._trim_roll = Config().get("trim_roll")
self._trim_pitch = Config().get("trim_pitch")
if (Config().get("flightmode") is "Normal"):
self._max_yaw_rate = Config().get("normal_max_yaw")
self._max_rp_angle = Config().get("normal_max_rp")
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("normal_min_thrust"),
Config().get("normal_max_thrust"))
self.set_thrust_slew_limiting(
Config().get("normal_slew_rate"),
Config().get("normal_slew_limit"))
else:
self._max_yaw_rate = Config().get("max_yaw")
self._max_rp_angle = Config().get("max_rp")
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("min_thrust"), Config().get("max_thrust"))
self.set_thrust_slew_limiting(
Config().get("slew_rate"), Config().get("slew_limit"))
self._dev_blacklist = None
if (len(Config().get("input_device_blacklist")) > 0):
self._dev_blacklist = re.compile(
Config().get("input_device_blacklist"))
logger.info("Using device blacklist [{}]".format(
Config().get("input_device_blacklist")))
self._available_devices = {}
# TODO: The polling interval should be set from config file
self._read_timer = PeriodicTimer(0.01, self.read_input)
if do_device_discovery:
self._discovery_timer = PeriodicTimer(1.0,
self._do_device_discovery)
self._discovery_timer.start()
# Check if user config exists, otherwise copy files
if not os.path.exists(ConfigManager().configs_dir):
logger.info("No user config found, copying dist files")
os.makedirs(ConfigManager().configs_dir)
for f in glob.glob(sys.path[0] +
"/cfclient/configs/input/[A-Za-z]*.json"):
dest = os.path.join(ConfigManager().
configs_dir, os.path.basename(f))
if not os.path.isfile(dest):
logger.debug("Copying %s", f)
shutil.copy2(f, ConfigManager().configs_dir)
ConfigManager().get_list_of_configs()
self.input_updated = Caller()
self.rp_trim_updated = Caller()
self.emergency_stop_updated = Caller()
self.device_discovery = Caller()
self.device_error = Caller()
self.althold_updated = Caller()
def setAltHoldAvailable(self, available):
self._has_pressure_sensor = available
def setAltHold(self, althold):
self._old_alt_hold = althold
def _do_device_discovery(self):
devs = self.getAvailableDevices()
if len(devs):
self.device_discovery.call(devs)
self._discovery_timer.stop()
def getAvailableDevices(self):
"""List all available and approved input devices.
This function will filter available devices by using the
blacklist configuration and only return approved devices."""
devs = self.inputdevice.getAvailableDevices()
approved_devs = []
for dev in devs:
if ((not self._dev_blacklist) or
(self._dev_blacklist and not
self._dev_blacklist.match(dev["name"]))):
self._available_devices[dev["name"]] = dev["id"]
approved_devs.append(dev)
return approved_devs
def enableRawReading(self, deviceId):
"""
Enable raw reading of the input device with id deviceId. This is used
to get raw values for setting up of input devices. Values are read
without using a mapping.
"""
self.inputdevice.enableRawReading(deviceId)
def disableRawReading(self):
"""Disable raw reading of input device."""
self.inputdevice.disableRawReading()
def readRawValues(self):
""" Read raw values from the input device."""
return self.inputdevice.readRawValues()
def start_input(self, device_name, config_name):
"""
Start reading input from the device with name device_name using config
config_name
"""
try:
device_id = self._available_devices[device_name]
self.inputdevice.start_input(
device_id,
ConfigManager().get_config(config_name))
self._read_timer.start()
except Exception:
self.device_error.call(
"Error while opening/initializing input device\n\n%s" %
(traceback.format_exc()))
def stop_input(self):
"""Stop reading from the input device."""
self._read_timer.stop()
def set_yaw_limit(self, max_yaw_rate):
"""Set a new max yaw rate value."""
self._max_yaw_rate = max_yaw_rate
def set_rp_limit(self, max_rp_angle):
"""Set a new max roll/pitch value."""
self._max_rp_angle = max_rp_angle
def set_thrust_slew_limiting(self, thrust_slew_rate, thrust_slew_limit):
"""Set new values for limit where the slewrate control kicks in and
for the slewrate."""
self._thrust_slew_rate = JoystickReader.p2t(thrust_slew_rate)
self._thrust_slew_limit = JoystickReader.p2t(thrust_slew_limit)
if (thrust_slew_rate > 0):
self._thrust_slew_enabled = True
else:
self._thrust_slew_enabled = False
def set_thrust_limits(self, min_thrust, max_thrust):
"""Set a new min/max thrust limit."""
self._min_thrust = JoystickReader.p2t(min_thrust)
self._max_thrust = JoystickReader.p2t(max_thrust)
def set_trim_roll(self, trim_roll):
"""Set a new value for the roll trim."""
self._trim_roll = trim_roll
def set_trim_pitch(self, trim_pitch):
"""Set a new value for the trim trim."""
self._trim_pitch = trim_pitch
def read_input(self):
"""Read input data from the selected device"""
try:
data = self.inputdevice.read_input()
roll = data["roll"] * self._max_rp_angle
pitch = data["pitch"] * self._max_rp_angle
thrust = data["thrust"]
yaw = data["yaw"]
raw_thrust = data["thrust"]
emergency_stop = data["estop"]
trim_roll = data["rollcal"]
trim_pitch = data["pitchcal"]
althold = data["althold"]
if (self._old_alt_hold != althold):
self.althold_updated.call(str(althold))
self._old_alt_hold = althold
if self._emergency_stop != emergency_stop:
self._emergency_stop = emergency_stop
self.emergency_stop_updated.call(self._emergency_stop)
# Thust limiting (slew, minimum and emergency stop)
if althold and self._has_pressure_sensor:
thrust = int(round(JoystickReader.deadband(thrust,0.2)*32767 + 32767)) #Convert to uint16
else:
if raw_thrust < 0.05 or emergency_stop:
thrust = 0
else:
thrust = self._min_thrust + thrust * (self._max_thrust -
self._min_thrust)
if (self._thrust_slew_enabled == True and
self._thrust_slew_limit > thrust and not
emergency_stop):
if self._old_thrust > self._thrust_slew_limit:
self._old_thrust = self._thrust_slew_limit
if thrust < (self._old_thrust - (self._thrust_slew_rate / 100)):
thrust = self._old_thrust - self._thrust_slew_rate / 100
if raw_thrust < 0 or thrust < self._min_thrust:
thrust = 0
self._old_thrust = thrust
# Yaw deadband
# TODO: Add to input device config?
yaw = JoystickReader.deadband(yaw,0.2)*self._max_yaw_rate
if trim_roll != 0 or trim_pitch != 0:
self._trim_roll += trim_roll
self._trim_pitch += trim_pitch
self.rp_trim_updated.call(self._trim_roll, self._trim_pitch)
trimmed_roll = roll + self._trim_roll
trimmed_pitch = pitch + self._trim_pitch
self.input_updated.call(trimmed_roll, trimmed_pitch, yaw, thrust)
except Exception:
logger.warning("Exception while reading inputdevice: %s",
traceback.format_exc())
self.device_error.call("Error reading from input device\n\n%s" %
traceback.format_exc())
self._read_timer.stop()
@staticmethod
def p2t(percentage):
"""Convert a percentage to raw thrust"""
return int(MAX_THRUST * (percentage / 100.0))
@staticmethod
def deadband(value, threshold):
if abs(value) < threshold:
value = 0
elif value > 0:
value -= threshold
elif value < 0:
value += threshold
return value/(1-threshold)
| gpl-2.0 |
belemizz/mimic2_tools | clinical_db/patient_classification.py | 1 | 7408 | """Classification of the patinets."""
from bunch import Bunch
import numpy as np
from mutil import p_info, Cache
from get_sample import PatientData, Mimic2m, Mimic2
import alg.clustering
import alg.classification
import alg.timeseries
mimic2m = Mimic2m()
mimic2 = Mimic2()
Default_db_param = Bunch(max_id=0, target_codes='chf', matched_only=False)
"""Param for database preparation.
:param max_id: maximum of subject id (0 for using all ids)
:param target_codes: keyword of a list of icd9 codes to select subjects
:param matched_only: select only subjects with continuous record
"""
Default_data_param = Bunch(n_lab=20, disch_origin=True, span=[-1., 0.],
coef_flag=False, tseries_flag=True, tseries_cycle=0.25)
"""Param for database preparation.
:param tseries_flag: True for use timeseries
:param tseres_cycle: Cycle of the timeseries
:param span: Span of interest
:param coef_flag: True to use coefficient of trend analysis
:param tseries_cycle: Cycle for sampling time sereis data
"""
Default_alg_param = Bunch(visualize_data=False,
class_param=alg.classification.Default_param,
tseries_param=alg.timeseries.Default_param,
n_cv_fold=10)
"""Param for algorithm
:param class_param: param for classification algorithm
:param tsereis_param: param for timeseries classification algorithm
:param n_cv_fold: number of folds in cross validation
"""
class ControlExperiment:
"""Base class of all the experiments class"""
def __init__(self,
max_id,
target_codes,
matched_only
):
param = locals().copy()
del param['self']
self.reproduction_param = param
self.max_id = max_id
self.target_codes = target_codes
self.matched_only = matched_only
self.id_list = self.get_id_list()
def set_db_param(self, db_param):
self.db_param = Bunch(db_param.copy())
self.max_id = db_param.max_id
self.target_codes = db_param.target_codes
self.matched_only = db_param.matched_only
self.id_list = self.get_id_list()
def get_id_list(self):
if self.target_codes == 'all':
id_list = mimic2.subject_all(self.max_id)
elif self.target_codes == 'chf':
id_list = mimic2.subject_with_chf(self.max_id)
elif self.target_codes:
id_list = mimic2.subject_with_icd9_codes(self.target_codes, True, True, self.max_id)
if self.matched_only:
id_matched = mimic2m.get_id_numerics(self.max_id)
id_list = list(set(id_list).intersection(set(id_matched)))
return sorted(id_list)
def set_data_param(self, data_param=None):
"""Set and Reset data_param
:param data_param: new parameter (None to reset param)
"""
if data_param is not None:
self.data_param = Bunch(data_param.copy())
else:
data_param = Bunch(self.data_param.copy())
self.n_lab = data_param.n_lab
self.disch_origin = data_param.disch_origin
self.span = data_param.span
self.coef_flag = data_param.coef_flag
self.tseries_flag = data_param.tseries_flag
self.tseries_cycle = data_param.tseries_cycle
def set_alg_param(self, alg_param=None):
"""Set and Reset alg_param
:param alg_param: new parameter (None to reset param)
"""
if alg_param is not None:
self.alg_param = Bunch(alg_param.copy())
else:
alg_param = Bunch(self.alg_param.copy())
self.visualize_data = alg_param.visualize_data
self.class_param = alg_param.class_param
self.tseries_param = alg_param.tseries_param
self.n_cv_fold = alg_param.n_cv_fold
def compare_class_alg(self, l_param):
result = []
for param in l_param:
self.class_param = param
result.append(self.execution())
self.set_alg_param()
return result
def compare_span(self, l_span, include_point=False):
result = []
if include_point:
self.tseries_flag = False
result.append(self.execution())
self.tseries_flag = True
for span in l_span:
self.span = span
result.append(self.execution())
self.set_data_param()
return result
def compare_cycle(self, l_cycle, include_point=False):
result = []
if include_point:
self.tseries_flag = False
result.append(self.execution())
self.tseries_flag = True
for cycle in l_cycle:
self.tseries_cycle = cycle
result.append(self.execution())
self.set_data_param()
return result
def compare_coef(self, l_span, include_point=False, include_ts=False):
result = []
if include_point:
self.coef_flag = False
self.tseries_flag = False
result.append(self.execution())
if include_ts:
self.coef_flag = False
self.tseries_flag = True
result.append(self.execution())
self.coef_flag = True
self.tseries_flag = False
for span in l_span:
self.span = span
result.append(self.execution())
self.set_data_param()
return result
class ControlClassification(ControlExperiment):
"""Control classification of the patients."""
def __init__(self,
max_id=2000,
target_codes=None,
matched_only=True,
n_lab=None,
n_med=None,
n_icd9=None
):
param = locals().copy()
del param['self']
self.reproduction_param = param
ControlExperiment.__init__(self, max_id, target_codes, matched_only)
self.n_lab = n_lab
self.n_med = n_med
self.n_icd9 = n_icd9
def classify_patient(self):
data = self.__data_preparation()
result = self.__eval_data(data)
self.__visualize(result)
def __data_preparation(self, cache_key='__data_preparation'):
p_info('Prepare Data')
cache = Cache(cache_key)
try:
raise IOError
return cache.load(self.reproduction_param)
except IOError:
patients = PatientData(self.id_list)
n_patients = patients.n_patient()
l_icd9, l_icd9_desc = patients.common_icd9(self.n_icd9)
comat_icd9, hist_icd9, _ = patients.comat_icd9(l_icd9)
retval = [n_patients, l_icd9, comat_icd9, hist_icd9]
return retval
return cache.save(retval, self.reproduction_param)
def __eval_data(self, data):
p_info('Eval Data')
[n_patients, l_icd9, comat_icd9, hist_icd9] = data
l_group = alg.clustering.group_comat(comat_icd9, hist_icd9, n_patients, 3.0, 0.1)
return [l_group, l_icd9]
def __visualize(self, result):
p_info('Visualize Result')
[l_group, l_icd9] = result
for g in set(l_group):
print "Group %d" % g
for idx in np.where(l_group == g)[0]:
print l_icd9[idx]
print '_____'
if __name__ == '__main__':
cc = ControlClassification()
cc.classify_patient()
| mit |
jimberlage/servo | tests/wpt/web-platform-tests/cors/resources/cors-makeheader.py | 122 | 2224 | import json
def main(request, response):
origin = request.GET.first("origin", request.headers.get('origin'))
if "check" in request.GET:
token = request.GET.first("token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first("check", None) == "keep":
request.server.stash.put(token, value)
body = "1"
else:
body = "0"
return [("Content-Type", "text/plain")], body
if origin != 'none':
response.headers.set("Access-Control-Allow-Origin", origin)
if 'origin2' in request.GET:
response.headers.append("Access-Control-Allow-Origin", request.GET.first('origin2'))
#Preflight
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
if 'credentials' in request.GET:
response.headers.set("Access-Control-Allow-Credentials", request.GET.first('credentials'))
if 'methods' in request.GET:
response.headers.set("Access-Control-Allow-Methods", request.GET.first('methods'))
code_raw = request.GET.first('code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == 'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if 'preflight' in request.GET:
code = int(request.GET.first('preflight'))
#Log that the preflight actually happened if we have an ident
if 'token' in request.GET:
request.server.stash.put(request.GET['token'], True)
if 'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set("Location", request.GET.first('location'))
headers = {}
for name, values in request.headers.iteritems():
if len(values) == 1:
headers[name] = values[0]
else:
#I have no idea, really
headers[name] = values
headers['get_value'] = request.GET.first('get_value', '')
body = json.dumps(headers)
if code:
return (code, "StatusText"), [], body
else:
return body
| mpl-2.0 |
Akylas/zxing | cpp/scons/scons-local-2.0.0.final.0/SCons/dblite.py | 95 | 7472 | # dblite.py module contributed by Ralf W. Grosse-Kunstleve.
# Extended for Unicode by Steven Knight.
import SCons.compat
import builtins
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import shutil
import time
keep_all_files = 00000
ignore_corrupt_dbfiles = 0
def corruption_warning(filename):
print "Warning: Discarding corrupt database:", filename
try: unicode
except NameError:
def is_string(s):
return isinstance(s, str)
else:
def is_string(s):
return type(s) in (str, unicode)
try:
unicode('a')
except NameError:
def unicode(s): return s
dblite_suffix = '.dblite'
tmp_suffix = '.tmp'
class dblite(object):
# Squirrel away references to the functions in various modules
# that we'll use when our __del__() method calls our sync() method
# during shutdown. We might get destroyed when Python is in the midst
# of tearing down the different modules we import in an essentially
# arbitrary order, and some of the various modules's global attributes
# may already be wiped out from under us.
#
# See the discussion at:
# http://mail.python.org/pipermail/python-bugs-list/2003-March/016877.html
_open = builtins.open
_pickle_dump = staticmethod(pickle.dump)
_os_chmod = os.chmod
try:
_os_chown = os.chown
except AttributeError:
_os_chown = None
_os_rename = os.rename
_os_unlink = os.unlink
_shutil_copyfile = shutil.copyfile
_time_time = time.time
def __init__(self, file_base_name, flag, mode):
assert flag in (None, "r", "w", "c", "n")
if (flag is None): flag = "r"
base, ext = os.path.splitext(file_base_name)
if ext == dblite_suffix:
# There's already a suffix on the file name, don't add one.
self._file_name = file_base_name
self._tmp_name = base + tmp_suffix
else:
self._file_name = file_base_name + dblite_suffix
self._tmp_name = file_base_name + tmp_suffix
self._flag = flag
self._mode = mode
self._dict = {}
self._needs_sync = 00000
if self._os_chown is not None and (os.geteuid()==0 or os.getuid()==0):
# running as root; chown back to current owner/group when done
try:
statinfo = os.stat(self._file_name)
self._chown_to = statinfo.st_uid
self._chgrp_to = statinfo.st_gid
except OSError, e:
# db file doesn't exist yet.
# Check os.environ for SUDO_UID, use if set
self._chown_to = int(os.environ.get('SUDO_UID', -1))
self._chgrp_to = int(os.environ.get('SUDO_GID', -1))
else:
self._chown_to = -1 # don't chown
self._chgrp_to = -1 # don't chgrp
if (self._flag == "n"):
self._open(self._file_name, "wb", self._mode)
else:
try:
f = self._open(self._file_name, "rb")
except IOError, e:
if (self._flag != "c"):
raise e
self._open(self._file_name, "wb", self._mode)
else:
p = f.read()
if (len(p) > 0):
try:
self._dict = pickle.loads(p)
except (pickle.UnpicklingError, EOFError):
if (ignore_corrupt_dbfiles == 0): raise
if (ignore_corrupt_dbfiles == 1):
corruption_warning(self._file_name)
def __del__(self):
if (self._needs_sync):
self.sync()
def sync(self):
self._check_writable()
f = self._open(self._tmp_name, "wb", self._mode)
self._pickle_dump(self._dict, f, 1)
f.close()
# Windows doesn't allow renaming if the file exists, so unlink
# it first, chmod'ing it to make sure we can do so. On UNIX, we
# may not be able to chmod the file if it's owned by someone else
# (e.g. from a previous run as root). We should still be able to
# unlink() the file if the directory's writable, though, so ignore
# any OSError exception thrown by the chmod() call.
try: self._os_chmod(self._file_name, 0777)
except OSError: pass
self._os_unlink(self._file_name)
self._os_rename(self._tmp_name, self._file_name)
if self._os_chown is not None and self._chown_to > 0: # don't chown to root or -1
try:
self._os_chown(self._file_name, self._chown_to, self._chgrp_to)
except OSError:
pass
self._needs_sync = 00000
if (keep_all_files):
self._shutil_copyfile(
self._file_name,
self._file_name + "_" + str(int(self._time_time())))
def _check_writable(self):
if (self._flag == "r"):
raise IOError("Read-only database: %s" % self._file_name)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._check_writable()
if (not is_string(key)):
raise TypeError("key `%s' must be a string but is %s" % (key, type(key)))
if (not is_string(value)):
raise TypeError("value `%s' must be a string but is %s" % (value, type(value)))
self._dict[key] = value
self._needs_sync = 0001
def keys(self):
return list(self._dict.keys())
def has_key(self, key):
return key in self._dict
def __contains__(self, key):
return key in self._dict
def iterkeys(self):
# Wrapping name in () prevents fixer from "fixing" this
return (self._dict.iterkeys)()
__iter__ = iterkeys
def __len__(self):
return len(self._dict)
def open(file, flag=None, mode=0666):
return dblite(file, flag, mode)
def _exercise():
db = open("tmp", "n")
assert len(db) == 0
db["foo"] = "bar"
assert db["foo"] == "bar"
db[unicode("ufoo")] = unicode("ubar")
assert db[unicode("ufoo")] == unicode("ubar")
db.sync()
db = open("tmp", "c")
assert len(db) == 2, len(db)
assert db["foo"] == "bar"
db["bar"] = "foo"
assert db["bar"] == "foo"
db[unicode("ubar")] = unicode("ufoo")
assert db[unicode("ubar")] == unicode("ufoo")
db.sync()
db = open("tmp", "r")
assert len(db) == 4, len(db)
assert db["foo"] == "bar"
assert db["bar"] == "foo"
assert db[unicode("ufoo")] == unicode("ubar")
assert db[unicode("ubar")] == unicode("ufoo")
try:
db.sync()
except IOError, e:
assert str(e) == "Read-only database: tmp.dblite"
else:
raise RuntimeError("IOError expected.")
db = open("tmp", "w")
assert len(db) == 4
db["ping"] = "pong"
db.sync()
try:
db[(1,2)] = "tuple"
except TypeError, e:
assert str(e) == "key `(1, 2)' must be a string but is <type 'tuple'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
try:
db["list"] = [1,2]
except TypeError, e:
assert str(e) == "value `[1, 2]' must be a string but is <type 'list'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
db = open("tmp", "r")
assert len(db) == 5
db = open("tmp", "n")
assert len(db) == 0
dblite._open("tmp.dblite", "w")
db = open("tmp", "r")
dblite._open("tmp.dblite", "w").write("x")
try:
db = open("tmp", "r")
except pickle.UnpicklingError:
pass
else:
raise RuntimeError("pickle exception expected.")
global ignore_corrupt_dbfiles
ignore_corrupt_dbfiles = 2
db = open("tmp", "r")
assert len(db) == 0
os.unlink("tmp.dblite")
try:
db = open("tmp", "w")
except IOError, e:
assert str(e) == "[Errno 2] No such file or directory: 'tmp.dblite'", str(e)
else:
raise RuntimeError("IOError expected.")
print "OK"
if (__name__ == "__main__"):
_exercise()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rahushen/ansible | lib/ansible/modules/network/nxos/nxos_system.py | 31 | 11749 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_system
extends_documentation_fragment: nxos
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco NXOS devices
description:
- This module provides declarative management of node system attributes
on Cisco NXOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configures the default domain
name suffix to be used when referencing this node by its
FQDN. This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
domain_lookup:
description:
- Enables or disables the DNS
lookup feature in Cisco NXOS. This argument accepts boolean
values. When enabled, the system will try to resolve hostnames
using DNS and when disabled, hostnames will not be resolved.
domain_search:
description:
- Configures a list of domain
name suffixes to search when performing DNS name resolution.
This argument accepts either a list of domain names or
a list of dicts that configure the domain name and VRF name. See
examples.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers or
a list of hashes that configure the name server and VRF name. See
examples.
system_mtu:
description:
- Specifies the mtu, must be an integer.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
nxos_system:
hostname: nxos01
domain_name: test.example.com
- name: remove configuration
nxos_system:
state: absent
- name: configure name servers
nxos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
- name: configure name servers with VRF support
nxos_system:
name_servers:
- { server: 8.8.8.8, vrf: mgmt }
- { server: 8.8.4.4, vrf: mgmt }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname nxos01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall(r'vrf context (\S+)', config)
return vrf in _CONFIGURED_VRFS
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) and (want.get(x) != have.get(x))
def difference(x, y, z):
return [item for item in x[z] if item not in y[z]]
def remove(cmd, commands, vrf=None):
if vrf:
commands.append('vrf context %s' % vrf)
commands.append(cmd)
if vrf:
commands.append('exit')
def add(cmd, commands, vrf=None):
if vrf:
if not has_vrf(module, vrf):
module.fail_json(msg='invalid vrf name %s' % vrf)
return remove(cmd, commands, vrf)
if state == 'absent':
if have['hostname']:
commands.append('no hostname')
for item in have['domain_name']:
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['domain_search']:
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in have['name_servers']:
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
if have['system_mtu']:
commands.append('no system jumbomtu')
if state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_lookup'):
cmd = 'ip domain-lookup'
if want['domain_lookup'] is False:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['domain_name']:
for item in difference(have, want, 'domain_name'):
cmd = 'no ip domain-name %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_name'):
cmd = 'ip domain-name %s' % item['name']
add(cmd, commands, item['vrf'])
if want['domain_search']:
for item in difference(have, want, 'domain_search'):
cmd = 'no ip domain-list %s' % item['name']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'domain_search'):
cmd = 'ip domain-list %s' % item['name']
add(cmd, commands, item['vrf'])
if want['name_servers']:
for item in difference(have, want, 'name_servers'):
cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'name_servers'):
cmd = 'ip name-server %s' % item['server']
add(cmd, commands, item['vrf'])
if needs_update('system_mtu'):
commands.append('system jumbomtu %s' % want['system_mtu'])
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
def parse_domain_name(config, vrf_config):
objects = list()
regex = re.compile(r'ip domain-name (\S+)')
match = regex.search(config, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
match = regex.search(cfg, re.M)
if match:
objects.append({'name': match.group(1), 'vrf': vrf})
return objects
def parse_domain_search(config, vrf_config):
objects = list()
for item in re.findall(r'^ip domain-list (\S+)', config, re.M):
objects.append({'name': item, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
for item in re.findall(r'ip domain-list (\S+)', cfg, re.M):
objects.append({'name': item, 'vrf': vrf})
return objects
def parse_name_servers(config, vrf_config, vrfs):
objects = list()
match = re.search('^ip name-server (.+)$', config, re.M)
if match:
for addr in match.group(1).split(' '):
if addr == 'use-vrf' or addr in vrfs:
continue
objects.append({'server': addr, 'vrf': None})
for vrf, cfg in iteritems(vrf_config):
vrf_match = re.search('ip name-server (.+)', cfg, re.M)
if vrf_match:
for addr in vrf_match.group(1).split(' '):
objects.append({'server': addr, 'vrf': vrf})
return objects
def parse_system_mtu(config):
match = re.search(r'^system jumbomtu (\d+)', config, re.M)
if match:
return int(match.group(1))
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=2, contents=config)
vrf_config = {}
vrfs = re.findall(r'^vrf context (\S+)$', config, re.M)
for vrf in vrfs:
config_data = configobj.get_block_config(path=['vrf context %s' % vrf])
vrf_config[vrf] = config_data
return {
'hostname': parse_hostname(config),
'domain_lookup': 'no ip domain-lookup' not in config,
'domain_name': parse_domain_name(config, vrf_config),
'domain_search': parse_domain_search(config, vrf_config),
'name_servers': parse_name_servers(config, vrf_config, vrfs),
'system_mtu': parse_system_mtu(config)
}
def validate_system_mtu(value, module):
if not 1500 <= value <= 9216:
module.fail_json(msg='system_mtu must be between 1500 and 9216')
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'domain_lookup': module.params['domain_lookup'],
'system_mtu': module.params['system_mtu']
}
domain_name = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
domain_search = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict()
), module)
for arg, cast in [('domain_name', domain_name), ('domain_search', domain_search),
('name_servers', name_servers)]:
if module.params[arg] is not None:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_lookup=dict(type='bool'),
# { name: <str>, vrf: <str> }
domain_name=dict(type='list'),
# {name: <str>, vrf: <str> }
domain_search=dict(type='list'),
# { server: <str>; vrf: <str> }
name_servers=dict(type='list'),
system_mtu=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Ideabin/Gist | yaml/serializer.py | 293 | 4165 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| mit |
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/setuptools/site-patch.py | 720 | 2389 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| mit |
agoragames/python-leaderboard | leaderboard/__init__.py | 1 | 8240 | from redis import Redis, ConnectionPool
from copy import deepcopy
from math import ceil
class Leaderboard(object):
VERSION = '1.1.8'
DEFAULT_PAGE_SIZE = 25
DEFAULT_REDIS_HOST = 'localhost'
DEFAULT_REDIS_PORT = 6379
DEFAULT_REDIS_DB = 0
ASC = 'asc'
DESC = 'desc'
@classmethod
def pool(self, host, port, db, pools={}):
'''
Fetch a redis conenction pool for the unique combination of host
and port. Will create a new one if there isn't one already.
'''
key = (host,port,db)
rval = pools.get( key )
if not isinstance(rval,ConnectionPool):
rval = ConnectionPool(host=host, port=port, db=db)
pools[ key ] = rval
return rval
def __init__(self, leaderboard_name, **options):
'''
Initialize a connection to a specific leaderboard. By default, will use a
redis connection pool for any unique host:port:db pairing.
The options and their default values (if any) are:
host : the host to connect to if creating a new handle ('localhost')
port : the port to connect to if creating a new handle (6379)
db : the redis database to connect to if creating a new handle (0)
page_size : the default number of items to return in each page (25)
connection : an existing redis handle if re-using for this leaderboard
connection_pool : redis connection pool to use if creating a new handle
'''
self.leaderboard_name = leaderboard_name
self.redis_connection = options.pop('connection', None)
self.options = deepcopy(options)
self.page_size = self.options.pop('page_size', self.DEFAULT_PAGE_SIZE)
if self.page_size < 1:
self.page_size = self.DEFAULT_PAGE_SIZE
self.order = self.options.pop('order', self.DESC).lower()
if not self.order in [self.ASC, self.DESC]:
raise ValueError("%s is not one of [%s]" % (self.order, ",".join([self.ASC, self.DESC])))
if not isinstance(self.redis_connection, Redis):
if 'connection_pool' not in self.options:
self.options['connection_pool'] = self.pool(
self.options.pop('host', self.DEFAULT_REDIS_HOST),
self.options.pop('port', self.DEFAULT_REDIS_PORT),
self.options.pop('db', self.DEFAULT_REDIS_DB)
)
self.redis_connection = Redis(**self.options)
@classmethod
def get_connection(self, **options):
redis_options = {}
redis_options['connection_pool'] = self.pool(
options.pop('host', self.DEFAULT_REDIS_HOST),
options.pop('port', self.DEFAULT_REDIS_PORT),
options.pop('db', self.DEFAULT_REDIS_DB)
)
return Redis(**redis_options)
def _get(self, **options):
if 'pipeline' in options and options['pipeline'] != None:
return options['pipeline']
return self.redis_connection
def pipeline(self):
return self.redis_connection.pipeline()
def commit(self, pipeline):
pipeline.execute()
def rank_member(self, member, score, **options):
# redis-py deprecated the non-kwarg form of zadd
return self._get(**options).zadd(self.leaderboard_name, **{str(member):score})
def remove_member(self, member, **options):
return self._get( **options).zrem(self.leaderboard_name, str(member))
def clear(self, **options):
'''Remove all rankings for this leaderboard.'''
self._get(**options).delete(self.leaderboard_name)
def total_members(self, **options):
return self._get(**options).zcard(self.leaderboard_name)
def total_pages(self, **options):
return ceil(float(self.total_members(**options)) / options.get('page_size',self.page_size))
def total_members_in_score_range(self, min_score, max_score, **options):
return self._get(**options).zcount(self.leaderboard_name, min_score, max_score)
def change_score_for(self, member, delta, **options):
return self._get(**options).zincrby(self.leaderboard_name, str(member), delta)
def rank_for(self, member, use_zero_index_for_rank = False, **options):
try:
return self._rank_method(self._get(**options), self.leaderboard_name, str(member))\
+ (0 if use_zero_index_for_rank else 1)
except: return None
def score_for(self, member, **options):
return self._get(**options).zscore(self.leaderboard_name, str(member))
def check_member(self, member, **options):
return not None == self._get(**options).zscore(self.leaderboard_name, str(member))
def score_and_rank_for(self, member, use_zero_index_for_rank = False, **options):
return {
'member' : member,
'score' : self.score_for(member, **options),
'rank' : self.rank_for(member, use_zero_index_for_rank, **options)
}
def remove_members_in_score_range(self, min_score, max_score, **options):
return self._get(**options).zremrangebyscore(self.leaderboard_name, min_score, max_score)
def leaders(self, current_page, with_scores = True, with_rank = True, use_zero_index_for_rank = False, **options):
if current_page < 1:
current_page = 1
page_size = options.get('page_size',self.page_size)
tpages = self.total_pages(page_size=page_size)
index_for_redis = current_page - 1
starting_offset = (index_for_redis * page_size)
if starting_offset < 0:
starting_offset = 0
ending_offset = (starting_offset + page_size) - 1
raw_leader_data = self._range_method(self._get(**options), self.leaderboard_name, int(starting_offset), int(ending_offset), withscores=with_scores)
if raw_leader_data:
return self._massage_leader_data(raw_leader_data, with_rank, use_zero_index_for_rank)
else:
return None
def around_me(self, member, with_scores = True, with_rank = True, use_zero_index_for_rank = False, **options):
reverse_rank_for_member = \
self._rank_method(self._get(**options), self.leaderboard_name, str(member))
if not reverse_rank_for_member is None:
page_size = options.get('page_size',self.page_size)
starting_offset = reverse_rank_for_member - (page_size / 2)
if starting_offset < 0:
starting_offset = 0
ending_offset = (starting_offset + page_size) - 1
raw_leader_data = self._range_method(self._get(**options), self.leaderboard_name, starting_offset, ending_offset, withscores=with_scores)
if raw_leader_data:
return self._massage_leader_data(raw_leader_data, with_rank, use_zero_index_for_rank)
return None
def ranked_in_list(self, members, with_scores = True, use_zero_index_for_rank = False):
ranks_for_members = []
for member in members:
data = {}
data['member'] = member
data['rank'] = self.rank_for(member, use_zero_index_for_rank)
if with_scores:
data['score'] = self.score_for(member)
ranks_for_members.append(data)
return ranks_for_members
def _range_method(self, connection, *args, **kwargs):
if self.order == self.DESC:
return connection.zrevrange(*args, **kwargs)
else:
return connection.zrange(*args, **kwargs)
def _rank_method(self, connection, *args, **kwargs):
if self.order == self.DESC:
return connection.zrevrank(*args, **kwargs)
else:
return connection.zrank(*args, **kwargs)
def _massage_leader_data(self, leaders, with_rank, use_zero_index_for_rank):
member_attribute = True
leader_data = []
for leader_data_item in leaders:
data = {}
data['member'] = leader_data_item[0]
data['score'] = leader_data_item[1]
if with_rank:
data['rank'] = self.rank_for(data['member'], use_zero_index_for_rank)
leader_data.append(data)
return leader_data
| mit |
jmwerner/recipes | tests/conftest.py | 1 | 7927 | '''Base configurations for tests'''
import os
import pytest
import bs4 as bs
import subprocess
import sys
sys.path.insert(0, 'generator')
import webpageGenerator as gen
@pytest.fixture(scope="session")
def sitemap_name():
return 'sitemap.xml'
@pytest.fixture(scope="session")
def root_directory():
return subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
@pytest.fixture(scope="session")
def xml_tag():
return 'loc'
@pytest.fixture(scope="session")
def raw_sitemap(root_directory, sitemap_name):
try:
with open(root_directory + '/' + sitemap_name) as file:
sitemap = file.read().replace('\n', '')
except ValueError:
print('Error: Specified sitemap does not exist')
return sitemap
@pytest.fixture(scope="session")
def processed_links_from_sitemap(raw_sitemap, xml_tag):
soup = bs.BeautifulSoup(raw_sitemap, 'html.parser')
sitemap_links = soup.findAll(xml_tag)
all_links = []
for i in range(0, len(sitemap_links)):
stripped_link = strip_link(sitemap_links[i], xml_tag)
all_links.append(stripped_link)
return all_links
@pytest.fixture(scope="session")
def recipe_raw_folder_name():
return 'allRecipes'
@pytest.fixture(scope="session")
def recipe_category_names(recipe_raw_folder_name):
categories = []
for folder in os.listdir(recipe_raw_folder_name):
if not folder.startswith('.'):
categories.append(folder)
return categories
def strip_link(link_input, tag):
link = str(link_input)
link = link.replace('<' + tag + '>', '')
link = link.replace('</' + tag + '>', '')
link = link.strip()
return link
class Helpers:
@staticmethod
def get_html_from_local_file(file):
# Replace base website with local path for fast reading
with open(file, "r") as f:
page = f.read()
return page
@staticmethod
def get_local_file_from_url(url, root_directory):
return url.replace('http://jmwerner.github.io/recipes', root_directory)
@staticmethod
def create_category_iterator(ingredients_dict):
unique_categories = ['']
for i in range(0, len(ingredients_dict)):
if 'category' in ingredients_dict[i]:
if ingredients_dict[i]['category'][0] not in unique_categories:
unique_categories.append(ingredients_dict[i]['category'][0])
if(len(unique_categories) == 1 and unique_categories[0] == ''):
unique_categories = ['noCategory']
output_dict = {}
for key in unique_categories:
output_dict[key] = 0
return output_dict
@staticmethod
def find_ingredient_category(input):
if 'category' not in input:
return 'noCategory'
else:
if not input['category'][0]:
return 'noCategory'
else:
return input['category'][0]
@staticmethod
def process_json_name(input_string):
output_string = input_string.strip(' \n').lower().title()
output_string = gen.lower_special_cases_in_string(output_string)
output_string = gen.replace_degrees_in_string(output_string)
return output_string
@staticmethod
def process_json_number(input_number):
return gen.convert_to_mixed_number(input_number)
@staticmethod
def process_html_ingredient_name(input_string):
output_string = input_string.replace('°', '°')
return output_string
@staticmethod
def make_ingredient_dict_from_html(html):
soup = bs.BeautifulSoup(html, 'html.parser')
ingredient_names_from_html = soup.find_all('span', \
{'id': lambda L: L and L.startswith('recipeIngredient')})
ingredient_numbers_from_html = soup.find_all('span', \
{'id': lambda L: L and L.startswith('recipeNumber')})
ingredient_units_from_html = soup.find_all('span', \
{'id': lambda L: L and L.startswith('recipeUnit')})
ingredient_dict = {}
if ingredient_names_from_html:
for i in range(0, len(ingredient_names_from_html)):
ingredient_id = ingredient_names_from_html[i].get('id')
ingredient_id = ingredient_id.replace('recipeIngredient-', '')
ingredient_dict[ingredient_id] = \
{'name': ingredient_names_from_html[i].text.strip(' \n'), \
'number': ingredient_numbers_from_html[i].text.strip(' \n'), \
'units': ingredient_units_from_html[i].text.strip(' \n'), \
'value_tag':ingredient_numbers_from_html[i].get('value')}
return ingredient_dict
@staticmethod
def convert_mixed_number_to_fraction(input_string):
splits = input_string.split(' ')
splits = [x for x in splits if x]
if len(splits) not in [1, 2]:
raise ValueError('Mixed number conversion failed due to improper form')
if len(splits) == 1:
inner_splits = splits[0].split('/')
inner_splits = [x for x in inner_splits if x]
if len(inner_splits) == 1:
return (int(inner_splits[0]), 1)
else:
return (int(inner_splits[0]), int(inner_splits[1]))
else:
splits_new = splits[1].split(' ')
splits_new = [x for x in splits_new if x]
if len(splits_new) == 1:
inner_splits_new = splits_new[0].split('/')
inner_splits_new = [x for x in inner_splits_new if x]
if len(inner_splits_new) == 1:
processed_touple = (int(inner_splits_new[0]), 1)
else:
processed_touple = (int(inner_splits_new[0]), int(inner_splits_new[1]))
output_numerator = processed_touple[0] + int(splits[0]) * processed_touple[1]
return (output_numerator, processed_touple[1])
@staticmethod
def simplify_fraction(numer, denom):
a = numer
b = denom
while b:
a, b = b, a % b
common_divisor = a
(reduced_num, reduced_den) = \
(numer / common_divisor, denom / common_divisor)
if reduced_den == 1:
return (int(reduced_num), int(reduced_den))
elif common_divisor == 1:
return (numer, denom)
else:
return (int(reduced_num), int(reduced_den))
def process_json_units(self, input_list, scaling_number = '1'):
scaling_number_fraction = self.convert_mixed_number_to_fraction(scaling_number)
plural = (gen.string_to_float(input_list['number'][0]) * \
scaling_number_fraction[0] / scaling_number_fraction[1]) > 1.0
processed_units = gen.set_plural_suffix(input_list['units'][0], plural)
return processed_units
def make_ingredient_dict_from_link(self, root_directory, link):
html = self.get_html_from_local_file(self.get_local_file_from_url(link, root_directory))
return self.make_ingredient_dict_from_html(html)
def process_and_scale_json_number(self, input_number, scaling_number):
mixed_number = gen.convert_to_mixed_number(input_number)
input_number_fraction = self.convert_mixed_number_to_fraction(mixed_number)
scaling_number_fraction = self.convert_mixed_number_to_fraction(scaling_number)
scaled_fraction = (input_number_fraction[0] * scaling_number_fraction[0], \
input_number_fraction[1] * scaling_number_fraction[1])
scaled_fraction = self.simplify_fraction(scaled_fraction[0], scaled_fraction[1])
scaled_fraction_string = str(scaled_fraction[0]) + '/' + str(scaled_fraction[1])
return gen.convert_to_mixed_number(scaled_fraction_string)
@pytest.fixture(scope="session")
def helpers():
return Helpers | mit |
Kazade/NeHe-Website | google_appengine/google/appengine/api/lib_config.py | 13 | 11808 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A mechanism for library configuration.
Whenever App Engine library code has the need for a user-configurable
value, it should use the following protocol:
1. Pick a prefix unique to the library module, e.g. 'mylib'.
2. Call lib_config.register(prefix, mapping) with that prefix as
the first argument and a dict mapping suffixes to default functions
as the second.
3. The register() function returns a config handle unique to this
prefix. The config handle object has attributes corresponding to
each of the suffixes given in the mapping. Call these functions
(they're not really methods even though they look like methods) to
access the user's configuration value. If the user didn't
configure a function, the default function from the mapping is
called instead.
4. Document the function name and its signature and semantics.
Users wanting to provide configuration values should create a module
named appengine_config.py in the top-level directory of their
application, and define functions as documented by various App Engine
library components in that module. To change the configuration, edit
the file and re-deploy the application. (When using the SDK, no
redeployment is required: the development server will pick up the
changes the next time it handles a request.)
Third party libraries can also use this mechanism. For casual use,
just calling the register() method with a unique prefix is okay. For
carefull libraries, however, it is recommended to instantiate a new
LibConfigRegistry instance using a different module name.
Example appengine_config.py file:
from somewhere import MyMiddleWareClass
def mylib_add_middleware(app):
app = MyMiddleWareClass(app)
return app
Example library use:
from google.appengine.api import lib_config
config_handle = lib_config.register(
'mylib',
{'add_middleware': lambda app: app})
def add_middleware(app):
return config_handle.add_middleware(app)
"""
__all__ = ['DEFAULT_MODNAME',
'LibConfigRegistry',
'ConfigHandle',
'register',
'main',
]
import logging
import os
import sys
import threading
DEFAULT_MODNAME = 'appengine_config'
class LibConfigRegistry(object):
"""A registry for library configuration values."""
def __init__(self, modname):
"""Constructor.
Args:
modname: The module name to be imported.
Note: the actual import of this module is deferred until the first
time a configuration value is requested through attribute access
on a ConfigHandle instance.
"""
self._modname = modname
self._registrations = {}
self._module = None
self._lock = threading.RLock()
def register(self, prefix, mapping):
"""Register a set of configuration names.
Args:
prefix: A shared prefix for the configuration names being registered.
If the prefix doesn't end in '_', that character is appended.
mapping: A dict mapping suffix strings to default values.
Returns:
A ConfigHandle instance.
It's okay to re-register the same prefix: the mappings are merged,
and for duplicate suffixes the most recent registration wins.
"""
if not prefix.endswith('_'):
prefix += '_'
self._lock.acquire()
try:
handle = self._registrations.get(prefix)
if handle is None:
handle = ConfigHandle(prefix, self)
self._registrations[prefix] = handle
finally:
self._lock.release()
handle._update_defaults(mapping)
return handle
def initialize(self, import_func=__import__):
"""Attempt to import the config module, if not already imported.
This function always sets self._module to a value unequal
to None: either the imported module (if imported successfully), or
a dummy object() instance (if an ImportError was raised). Other
exceptions are *not* caught.
When a dummy instance is used, it is also put in sys.modules.
This allows us to detect when sys.modules was changed (as
dev_appserver.py does when it notices source code changes) and
re-try the __import__ in that case, while skipping it (for speed)
if nothing has changed.
Args:
import_func: Used for dependency injection.
"""
self._lock.acquire()
try:
if (self._module is not None and
self._module is sys.modules.get(self._modname)):
return
try:
import_func(self._modname)
except ImportError, err:
if str(err) != 'No module named %s' % self._modname:
raise
self._module = object()
sys.modules[self._modname] = self._module
else:
self._module = sys.modules[self._modname]
finally:
self._lock.release()
def reset(self):
"""Drops the imported config module.
If the config module has not been imported then this is a no-op.
"""
self._lock.acquire()
try:
if self._module is None:
return
self._module = None
handles = self._registrations.values()
finally:
self._lock.release()
for handle in handles:
handle._clear_cache()
def _pairs(self, prefix):
"""Generate (key, value) pairs from the config module matching prefix.
Args:
prefix: A prefix string ending in '_', e.g. 'mylib_'.
Yields:
(key, value) pairs where key is the configuration name with
prefix removed, and value is the corresponding value.
"""
self._lock.acquire()
try:
mapping = getattr(self._module, '__dict__', None)
if not mapping:
return
items = mapping.items()
finally:
self._lock.release()
nskip = len(prefix)
for key, value in items:
if key.startswith(prefix):
yield key[nskip:], value
def _dump(self):
"""Print info about all registrations to stdout."""
self.initialize()
handles = []
self._lock.acquire()
try:
if not hasattr(self._module, '__dict__'):
print 'Module %s.py does not exist.' % self._modname
elif not self._registrations:
print 'No registrations for %s.py.' % self._modname
else:
print 'Registrations in %s.py:' % self._modname
print '-'*40
handles = self._registrations.items()
finally:
self._lock.release()
for _, handle in sorted(handles):
handle._dump()
class ConfigHandle(object):
"""A set of configuration for a single library module or package.
Public attributes of instances of this class are configuration
values. Attributes are dynamically computed (in __getattr__()) and
cached as regular instance attributes.
"""
_initialized = False
def __init__(self, prefix, registry):
"""Constructor.
Args:
prefix: A shared prefix for the configuration names being registered.
It *must* end in '_'. (This is enforced by LibConfigRegistry.)
registry: A LibConfigRegistry instance.
"""
assert prefix.endswith('_')
self._prefix = prefix
self._defaults = {}
self._overrides = {}
self._registry = registry
self._lock = threading.RLock()
def _update_defaults(self, mapping):
"""Update the default mappings.
Args:
mapping: A dict mapping suffix strings to default values.
"""
self._lock.acquire()
try:
for key, value in mapping.iteritems():
if key.startswith('__') and key.endswith('__'):
continue
self._defaults[key] = value
if self._initialized:
self._update_configs()
finally:
self._lock.release()
def _update_configs(self):
"""Update the configuration values.
This clears the cached values, initializes the registry, and loads
the configuration values from the config module.
"""
self._lock.acquire()
try:
if self._initialized:
self._clear_cache()
self._registry.initialize()
for key, value in self._registry._pairs(self._prefix):
if key not in self._defaults:
logging.warn('Configuration "%s" not recognized', self._prefix + key)
else:
self._overrides[key] = value
self._initialized = True
finally:
self._lock.release()
def _clear_cache(self):
"""Clear the cached values."""
self._lock.acquire()
try:
self._initialized = False
for key in self._defaults:
self._overrides.pop(key, None)
try:
delattr(self, key)
except AttributeError:
pass
finally:
self._lock.release()
def _dump(self):
"""Print info about this set of registrations to stdout."""
self._lock.acquire()
try:
print 'Prefix %s:' % self._prefix
if self._overrides:
print ' Overrides:'
for key in sorted(self._overrides):
print ' %s = %r' % (key, self._overrides[key])
else:
print ' No overrides'
if self._defaults:
print ' Defaults:'
for key in sorted(self._defaults):
print ' %s = %r' % (key, self._defaults[key])
else:
print ' No defaults'
print '-'*40
finally:
self._lock.release()
def __getattr__(self, suffix):
"""Dynamic attribute access.
Args:
suffix: The attribute name.
Returns:
A configuration values.
Raises:
AttributeError if the suffix is not a registered suffix.
The first time an attribute is referenced, this method is invoked.
The value returned taken either from the config module or from the
registered default.
"""
self._lock.acquire()
try:
if not self._initialized:
self._update_configs()
if suffix in self._overrides:
value = self._overrides[suffix]
elif suffix in self._defaults:
value = self._defaults[suffix]
else:
raise AttributeError(suffix)
setattr(self, suffix, value)
return value
finally:
self._lock.release()
_default_registry = LibConfigRegistry(DEFAULT_MODNAME)
def register(prefix, mapping):
"""Register a set of configurations with the default config module.
Args:
prefix: A shared prefix for the configuration names being registered.
If the prefix doesn't end in '_', that character is appended.
mapping: A dict mapping suffix strings to default values.
Returns:
A ConfigHandle instance.
"""
return _default_registry.register(prefix, mapping)
def main():
"""CGI-style request handler to dump the configuration.
Put this in your app.yaml to enable (you can pick any URL):
- url: /lib_config
script: $PYTHON_LIB/google/appengine/api/lib_config.py
Note: unless you are using the SDK, you must be admin.
"""
if not os.getenv('SERVER_SOFTWARE', '').startswith('Dev'):
from google.appengine.api import users
if not users.is_current_user_admin():
if users.get_current_user() is None:
print 'Status: 302'
print 'Location:', users.create_login_url(os.getenv('PATH_INFO', ''))
else:
print 'Status: 403'
print
print 'Forbidden'
return
print 'Content-type: text/plain'
print
_default_registry._dump()
if __name__ == '__main__':
main()
| bsd-3-clause |
mitodl/odl-video-service | s3_sync/s3_sync.py | 1 | 9412 | #!/usr/bin/env python
"""
Read in setting values from ini file and then run s3 sync between local folder
and specified S3 bucket. Send results to local logfile & notify slack channel.
Use:
python s3_sync.py -i <settings_file.ini>
"""
import argparse
import os
import re
import subprocess
import sys
from configparser import ConfigParser, ExtendedInterpolation
try:
import requests
from logbook import Logger, RotatingFileHandler
except ImportError as err:
print("Failed to import module: ", err)
sys.exit("Make sure to pip install requests and logbook")
# Instantiate argparse to get settings_file as argument
parser = argparse.ArgumentParser(description=".")
parser.add_argument(
"-i",
dest="settings_file",
required=True,
help="path to ini file containing configs",
metavar="FILE",
)
args = parser.parse_args()
settings_file = args.settings_file
# Read settings_file
config = ConfigParser(interpolation=ExtendedInterpolation())
try:
config.read(settings_file)
except IOError:
sys.exit("[-] Failed to read settings file")
# Configure logbook logging
logger = RotatingFileHandler(
config["Logs"]["logfile"],
max_size=int(config["Logs"]["max_size"]),
backup_count=int(config["Logs"]["backup_count"]),
level=int(config["Logs"]["level"]),
)
logger.push_application()
logger = Logger(__name__)
# Get Computer name
computer_name = os.environ["COMPUTERNAME"]
def set_environment_variables():
"""
Set some of the read settings as environment variables.
"""
os.environ["AWS_ACCESS_KEY_ID"] = config["AWS"]["AWS_ACCESS_KEY_ID"]
os.environ["AWS_SECRET_ACCESS_KEY"] = config["AWS"]["AWS_SECRET_ACCESS_KEY"]
os.environ["slack_webhook_url"] = config["Slack"]["webhook_url"]
def verify_local_folders_exist():
"""
Check whether the required folders exist
Returns:
If folders exist return None, and if not, logs error and exit.
"""
for folder in config["Paths"].values():
if not os.path.exists(folder):
logger.error("Missing folder: ", folder)
sys.exit("[-] Missing folder: ", folder)
def verify_aws_cli_installed(aws_cli_binary):
"""
Check whether AWS CLI is installed
Args:
aws cli binary (str): absolute path to aws cli binary file.
Returns:
If file exists, return None, else log error and exit.
"""
if not os.path.exists(aws_cli_binary):
logger.error("Could not find AWS CLI executable")
sys.exit("[-] Could not find AWS CLI executable")
def verify_s3_bucket_exists(s3_bucket_name):
"""
Check whether S3 bucket exists
Args:
s3_bucket_name (str): The s3 bucket name
Returns:
list: if connection established and bucket found, return list of
objects in bucket otherwise error and exit on any issues trying
to list objects in bucket.
"""
ls_s3_bucket_cmd = "aws s3api head-bucket --bucket {}".format(s3_bucket_name)
try:
subprocess.run(
ls_s3_bucket_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except subprocess.SubprocessError:
logger.exception("Failed to list specified s3 bucket: {}", s3_bucket_name)
sys.exit("[-] Failed to list specified s3 bucket")
def check_if_file_already_synced(
local_video_records_done_folder,
local_video_records_synced_folder,
local_video_records_conflict_folder,
):
"""
Get a list of file names in local_video_records_done_folder and
check if they exist in local_video_records_synced_folder. If file
exists, that means it has already been synced and will be moved to
a local conflict folder
Args:
local_video_records_done_folder (str): local folder containing video
files ready to be copied to S3.
local_video_records_synced_folder (str): local folder containing video
files that have been copied to S3.
local_video_records_conflict_folder (str): local folder containing
video files that appeared in both done and synced folders
simultaneously.
"""
for file_name in os.listdir(local_video_records_done_folder):
if os.path.isfile(local_video_records_synced_folder + "/" + file_name):
os.replace(
f"{local_video_records_done_folder}/{file_name}",
f"{local_video_records_conflict_folder}/{file_name}",
)
notify_slack_channel(
f"*Failed* to copy file from `{local_video_records_done_folder}`"
f"to `{local_video_records_synced_folder}`."
f"Moved following file(s) to conflict folder: {file_name}"
)
def notify_slack_channel(slack_message):
"""
Send notification to Slack Channel
Args:
slack_message (str): message to send to slack
"""
try:
requests.post(
os.environ.get("slack_webhook_url"),
json={
"text": slack_message,
"username": config["Slack"]["bot_username"],
"icon_emoji": config["Slack"]["bot_emoji"],
},
)
except (requests.exceptions.RequestException, NameError) as err:
logger.warn("Failed to notify slack channel with following error: {}", err)
def sync_local_to_s3(
local_video_records_done_folder, s3_bucket_name, s3_sync_result_file
):
"""
Sync local files to specified S3 bucket
Args:
local_video_records_done_folder (str): local folder containing video
files ready to be copied to S3.
s3_bucket_name (str): s3 bucket name
"""
if not os.listdir(local_video_records_done_folder):
logger.info("Nothing to sync. {} folder empty", local_video_records_done_folder)
notify_slack_channel(
f"No videos in done folder to to sync "
f"to S3 on the following lecture capture "
f"computer: *{computer_name}*"
)
sys.exit("[-] Nothing to sync. Folder empty")
s3_sync_cmd = 'aws s3 sync {} s3://{} > "{}"'.format(
local_video_records_done_folder, s3_bucket_name, s3_sync_result_file
)
try:
cmd_output = subprocess.run(
s3_sync_cmd,
check=True,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.SubprocessError as err:
logger.exception("Failed to sync local files to s3 bucket")
notify_slack_channel(
f"*Failed* to sync video(s) from done folder "
f"to S3 on the following lecture capture "
f"computer: *{computer_name}* \n `{err}`"
)
sys.exit("[-] Failed to sync local files to s3 bucket")
logger.info("S3 sync successfully ran: {}", cmd_output)
def move_files_to_synced_folder(
local_video_records_done_folder,
local_video_records_synced_folder,
s3_sync_result_file,
):
"""
Move local files in the done folder that have already been synced to S3,
to the local synced folder and notify slack on completion.
Args:
local_video_records_done_folder (str): local folder containing video
files that should have been copied to S3.
local_video_records_synced_folder (str): local folder containing video
files that have already been copied to S3.
s3_sync_result_file (str): local file containing result of s3 sync
operation.
"""
if not os.path.exists(s3_sync_result_file):
logger.warning("Could not find S3 sync results file", s3_sync_result_file)
sys.exit("[-] Could not find S3 sync results file")
with open(s3_sync_result_file) as file_name:
s3_sync_result_data = file_name.read()
for file_name in re.findall(r"upload:\s(?:.*\\)(.*)to", s3_sync_result_data):
try:
os.rename(
f"{local_video_records_done_folder}/{file_name}",
f"{local_video_records_synced_folder}/{file_name}",
)
notify_slack_channel(
f"Successfully synced the following file from "
f"lecutre capture computer *{computer_name}* to S3: \n"
f"`{file_name}`"
)
except OSError as err:
logger.exception("Failed to copy or remove local file", err)
def main():
"""
Set local environment variables from settings file,
then run some verficiation checks, and then sync local
files to specified s3 bucket.
"""
set_environment_variables()
verify_local_folders_exist()
verify_aws_cli_installed(
config.get(
"Paths", "aws_cli_binary", fallback="C:/Program Files/Amazon/AWSCLI/aws.exe"
)
)
verify_s3_bucket_exists(config["AWS"]["s3_bucket_name"])
check_if_file_already_synced(
config["Paths"]["local_video_records_done_folder"],
config["Paths"]["local_video_records_synced_folder"],
config["Paths"]["local_video_records_conflict_folder"],
)
sync_local_to_s3(
config["Paths"]["local_video_records_done_folder"],
config["AWS"]["s3_bucket_name"],
config["Logs"]["sync_results"],
)
move_files_to_synced_folder(
config["Paths"]["local_video_records_done_folder"],
config["Paths"]["local_video_records_synced_folder"],
config["Logs"]["sync_results"],
)
if __name__ == "__main__":
main()
| bsd-3-clause |
ahmed-mahran/hue | desktop/libs/notebook/src/notebook/views.py | 2 | 6418 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.django_util import render, JsonResponse
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.models import Document2, Document
from spark.conf import LIVY_SERVER_SESSION_KIND
from notebook.decorators import check_document_access_permission, check_document_modify_permission
from notebook.connectors.base import Notebook, get_api
from notebook.management.commands.notebook_setup import Command
from notebook.connectors.spark_shell import SparkApi
from notebook.conf import get_interpreters
LOG = logging.getLogger(__name__)
@check_document_access_permission()
def notebook(request):
notebook_id = request.GET.get('notebook')
if notebook_id:
notebook = Notebook(document=Document2.objects.get(id=notebook_id))
else:
notebook = Notebook()
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={})
except:
LOG.exception('failed to get autocomplete base url')
return render('notebook.mako', request, {
'notebooks_json': json.dumps([notebook.get_data()]),
'options_json': json.dumps({
'languages': get_interpreters(),
'session_properties': SparkApi.PROPERTIES
}),
'autocomplete_base_url': autocomplete_base_url,
'is_yarn_mode': LIVY_SERVER_SESSION_KIND.get()
})
@check_document_access_permission()
def editor(request):
editor_id = request.GET.get('editor')
if editor_id:
editor = Notebook(document=Document2.objects.get(id=editor_id))
else:
editor = Notebook()
data = editor.get_data()
data['name'] = 'Hive SQL Editor'
data['snippets'] = json.loads('[{"id":"c111cbb4-f475-4050-c5a1-02df6c31e3d8","name":"","type":"hive","editorMode":"text/x-hiveql","statement_raw":"Example: SELECT * FROM tablename, or press CTRL + space","codemirrorSize":100,"status":"ready","properties":{"settings":[],"files":[]},"variables":[],"variableNames":[],"statement":"Example: SELECT * FROM tablename, or press CTRL + space","result":{"id":"149347d9-3ae7-8d06-4cc8-d4bce5e72dc8","type":"table","hasResultset":true,"handle":{},"meta":[],"cleanedMeta":[],"fetchedOnce":false,"startTime":"2015-07-17T20:38:21.970Z","endTime":"2015-07-17T20:38:21.970Z","executionTime":0,"cleanedNumericMeta":[],"cleanedStringMeta":[],"cleanedDateTimeMeta":[],"data":[],"logs":"","logLines":0,"errors":"","hasSomeResults":false},"showGrid":true,"showChart":false,"showLogs":false,"progress":0,"size":12,"offset":0,"isLoading":false,"klass":"snippet card card-widget","editorKlass":"editor span12","resultsKlass":"results hive","errorsKlass":"results hive alert alert-error","chartType":"bars","chartSorting":"none","chartYMulti":[],"chartData":[],"tempChartOptions":{},"isLeftPanelVisible":false,"codeVisible":true,"settingsVisible":false,"checkStatusTimeout":null}]')
editor.data = json.dumps(data)
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={})
except:
LOG.exception('failed to get autocomplete base url')
return render('editor.mako', request, {
'notebooks_json': json.dumps([editor.get_data()]),
'options_json': json.dumps({
'languages': [{"name": "Hive SQL", "type": "hive"}],
'snippet_placeholders' : {
'scala': _('Example: 1 + 1, or press CTRL + space'),
'python': _('Example: 1 + 1, or press CTRL + space'),
'impala': _('Example: SELECT * FROM tablename, or press CTRL + space'),
'hive': _('Example: SELECT * FROM tablename, or press CTRL + space'),
'text': _('<h2>This is a text snippet</h2>Type your text here')
}
}),
'autocomplete_base_url': autocomplete_base_url,
})
def new(request):
return notebook(request)
def notebooks(request):
notebooks = [d.content_object.to_dict() for d in Document.objects.get_docs(request.user, Document2, extra='notebook')]
return render('notebooks.mako', request, {
'notebooks_json': json.dumps(notebooks, cls=JSONEncoderForHTML)
})
@check_document_modify_permission()
def delete(request):
notebooks = json.loads(request.POST.get('notebooks', '[]'))
for notebook in notebooks:
doc2 = Document2.objects.get(uuid=notebook['uuid'])
doc = doc2.doc.get()
doc.can_write_or_exception(request.user)
doc.delete()
doc2.delete()
return JsonResponse({})
@check_document_access_permission()
def copy(request):
notebooks = json.loads(request.POST.get('notebooks', '[]'))
for notebook in notebooks:
doc2 = Document2.objects.get(uuid=notebook['uuid'])
doc = doc2.doc.get()
name = doc2.name + '-copy'
doc2 = doc2.copy(name=name, owner=request.user)
doc.copy(content_object=doc2, name=name, owner=request.user)
return JsonResponse({})
@check_document_access_permission()
def download(request):
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
file_format = request.POST.get('format', 'csv')
return get_api(request.user, snippet).download(notebook, snippet, file_format)
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
Command().handle(user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
| apache-2.0 |
bjolivot/ansible | lib/ansible/plugins/action/dellos6_config.py | 111 | 4194 | # Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.dellos6 import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
hobarrera/todoman | todoman/interactive.py | 1 | 6168 | import urwid
from todoman import widgets
_palette = [("error", "light red", "")]
class TodoEditor:
"""
The UI for a single todo entry.
"""
def __init__(self, todo, lists, formatter):
"""
:param model.Todo todo: The todo object which will be edited.
"""
self.current_list = todo.list
self.todo = todo
self.lists = list(lists)
self.formatter = formatter
self._loop = None
self._status = urwid.Text("")
self._init_basic_fields()
self._init_list_selector()
self._init_help_text()
save_btn = urwid.Button("Save", on_press=self._save)
cancel_text = urwid.Text("Hit Ctrl-C to cancel, F1 for help.")
buttons = urwid.Columns([(8, save_btn), cancel_text], dividechars=2)
pile_items = []
for label, field in [
("Summary", self._summary),
("Description", self._description),
("Location", self._location),
("Start", self._dtstart),
("Due", self._due),
("Completed", self._completed),
("Priority", self._priority),
]:
label = urwid.Text(label + ":", align="right")
column = urwid.Columns([(13, label), field], dividechars=1)
pile_items.append(("pack", column))
grid = urwid.Pile(pile_items)
spacer = urwid.Divider()
self.left_column = urwid.ListBox(
urwid.SimpleListWalker([grid, spacer, self._status, buttons])
)
right_column = urwid.ListBox(
urwid.SimpleListWalker([urwid.Text("List:\n")] + self.list_selector)
)
self._ui = urwid.Columns([self.left_column, right_column])
def _init_basic_fields(self):
self._summary = widgets.ExtendedEdit(
parent=self,
edit_text=self.todo.summary,
)
self._description = widgets.ExtendedEdit(
parent=self,
edit_text=self.todo.description,
multiline=True,
)
self._location = widgets.ExtendedEdit(
parent=self,
edit_text=self.todo.location,
)
self._due = widgets.ExtendedEdit(
parent=self,
edit_text=self.formatter.format_datetime(self.todo.due),
)
self._dtstart = widgets.ExtendedEdit(
parent=self,
edit_text=self.formatter.format_datetime(self.todo.start),
)
self._completed = urwid.CheckBox("", state=self.todo.is_completed)
self._priority = widgets.PrioritySelector(
parent=self,
priority=self.todo.priority,
formatter_function=self.formatter.format_priority,
)
def _init_list_selector(self):
self.list_selector = []
for _list in self.lists:
urwid.RadioButton(
self.list_selector,
_list.name,
state=_list == self.current_list,
on_state_change=self._change_current_list,
user_data=_list,
)
def _init_help_text(self):
self._help_text = urwid.Text(
"\n\n"
"Global:\n"
" F1: Toggle help\n"
" Ctrl-C: Cancel\n"
" Ctrl-S: Save (only works if not a shell shortcut already)\n"
"\n"
"In Textfields:\n"
+ "\n".join(" {}: {}".format(k, v) for k, v in widgets.ExtendedEdit.HELP)
+ "\n\nIn Priority Selector:\n"
+ "\n".join(
" {}: {}".format(k, v) for k, v in widgets.PrioritySelector.HELP
)
)
def _change_current_list(self, radio_button, new_state, new_list):
if new_state:
self.current_list = new_list
def _toggle_help(self):
if self.left_column.body.contents[-1] is self._help_text:
self.left_column.body.contents.pop()
else:
self.left_column.body.contents.append(self._help_text)
self._loop.draw_screen()
def set_status(self, text):
self._status.set_text(text)
def edit(self):
"""Shows the UI for editing a given todo."""
self._loop = urwid.MainLoop(
self._ui,
palette=_palette,
unhandled_input=self._keypress,
handle_mouse=False,
)
try:
self._loop.run()
except KeyboardInterrupt:
self._loop.stop() # Try to leave terminal in usable state
self._loop = None
def _save(self, btn=None):
try:
self._save_inner()
except Exception as e:
self.set_status(("error", str(e)))
else:
raise urwid.ExitMainLoop()
def _save_inner(self):
self.todo.list = self.current_list
self.todo.summary = self.summary
self.todo.description = self.description
self.todo.location = self.location
self.todo.due = self.formatter.parse_datetime(self.due)
self.todo.start = self.formatter.parse_datetime(self.dtstart)
if not self.todo.is_completed and self._completed.get_state():
self.todo.complete()
elif self.todo.is_completed and not self._completed.get_state():
self.todo.status = "NEEDS-ACTION"
self.todo.completed_at = None
self.todo.priority = self.priority
# TODO: categories
# TODO: comment
# https://tools.ietf.org/html/rfc5545#section-3.8
# geo (lat, lon)
# RESOURCE: the main room
def _keypress(self, key):
if key.lower() == "f1":
self._toggle_help()
elif key == "ctrl s":
self._save()
@property
def summary(self):
return self._summary.edit_text
@property
def description(self):
return self._description.edit_text
@property
def location(self):
return self._location.edit_text
@property
def due(self):
return self._due.edit_text
@property
def dtstart(self):
return self._dtstart.edit_text
@property
def priority(self):
return self._priority.priority
| isc |
splice/splice-server | src/splice/managers/upload.py | 1 | 7041 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import time
from splice.common import config, splice_server_client
from splice.common.exceptions import RequestException
from splice.common.models import ProductUsage, SpliceServer, SpliceServerTransferInfo
from logging import getLogger
_LOG = getLogger(__name__)
def upload_product_usage_data(cfg=None):
"""
@param cfg: optional argument to use a special instance of ConfigParser to determine values,
mainly used for unit testing,
@return: None
"""
cfg_info = config.get_reporting_config_info(cfg)
if not cfg_info["servers"]:
_LOG.info("No servers are configured to upload product usage data to")
return
limit = None
if cfg_info["limit_per_call"]:
limit = cfg_info["limit_per_call"]
for server in cfg_info["servers"]:
try:
(addr, port, url) = server
_process_product_usage_upload(addr, port, url, limit)
_process_splice_server_metadata_upload(addr, port, url)
except Exception, e:
_LOG.exception("Caught exception when processing upload to (%s, %s, %s)" % (addr, port, url))
_LOG.info("Related configuration is: '%s'" % (cfg_info))
###
# - Internal functions below
###
def _process_splice_server_metadata_upload(addr, port, url, since=None):
url = url + "/spliceserver/" # must end in '/'
cursor = _get_splice_server_metadata(addr, since)
data = list(cursor)
if not data:
_LOG.info("No new splice server data to upload")
return True
last_timestamp = data[-1].updated
try:
_LOG.info("Uploading %s SpliceServer objects to %s:%s/%s" % (len(data), addr, port, url))
splice_server_client.upload_splice_server_metadata(addr, port, url, data)
except RequestException, e:
_LOG.exception("Received exception attempting to send %s records from %s to %s:%s\%s" % (len(data), last_timestamp, addr, port, url))
return False
_update_last_timestamp(addr, last_timestamp, SpliceServerTransferInfo)
return True
def _process_product_usage_upload(addr, port, url, limit, gzip_body=True):
"""
@param addr: address of remote server
@param port: port of remote server
@param url: url of remote server
@param limit: max amount of objects to process per request
@param gzip_body: defaults to True, will gzip the request body
@return: True on success, False on failure
"""
url = url + "/productusage/" #must end in '/'
time_a = time.time()
cursor = _get_product_usage_data(addr, limit)
time_b = time.time()
pu_data = list(cursor)
time_c = time.time()
if not pu_data:
_LOG.info("No new product usage data to upload")
return True
#last_timestamp = pu_data[-1].date
try:
_LOG.info("Uploading %s ProductUsage entries to %s:%s/%s" % (len(pu_data), addr, port, url))
# TODO:
# Examine return values and determine, what/if any objects were not successfuly uploaded.
time_d = time.time()
splice_server_client.upload_product_usage_data(addr, port, url, pu_data, gzip_body=gzip_body)
time_e = time.time()
# Mark the successfully uploaded objects as transferred
# TODO: Update logic to account for return value from upload call
object_ids = [x.id for x in pu_data]
_mark_sent(object_ids, addr)
time_f = time.time()
_LOG.info("%s seconds to fetch/upload %s ProductUsage entries to %s:%s/%s" % (time_f-time_a, len(pu_data), addr, port, url))
_LOG.info(" %s seconds to fetch %s ProductUsage entries, %s for initial mongo query %s seconds to convert to list" % \
(time_c-time_a, len(pu_data), time_b-time_a, time_c-time_b))
_LOG.info(" %s seconds to upload %s ProductUsage entries, %s seconds to update tracker" % (time_e-time_d, len(pu_data), time_f-time_e))
# Log items unsuccessful and retry upload
except RequestException, e:
_LOG.exception("Received exception attempting to send %s records to %s:%s\%s" % (len(pu_data), addr, port, url))
return False
#_update_last_timestamp(addr, last_timestamp, ProductUsageTransferInfo)
return True
def _mark_sent(object_ids, addr):
for oid in object_ids:
ProductUsage.objects(id=oid).update(add_to_set__tracker=addr)
def _unmark_sent(object_ids, addr):
for oid in object_ids:
ProductUsage.objects(id=oid).update(pull__tracker=addr)
def _update_last_timestamp(addr, timestamp, transfer_cls):
transfer = transfer_cls.objects(server_hostname=addr).first()
if not transfer:
transfer = transfer_cls(server_hostname=addr)
transfer.last_timestamp = timestamp
transfer.save()
def _get_splice_server_metadata(addr, since=None):
"""
Returns splice server metadata which has not yet been uploaded to 'addr'
@param addr: remote server to upload data to
@param since: Optional, date we want to send data from, intended for unit tests only
@type since: datetime.datetime
@return: list of splice server objects ordered by date
"""
last_timestamp = since
data_transfer = SpliceServerTransferInfo.objects(server_hostname=addr).first()
# Get the last timestamp we sent to 'addr'
if not last_timestamp and data_transfer:
last_timestamp = data_transfer.last_timestamp
if last_timestamp:
data = SpliceServer.objects(updated__gt=last_timestamp)
else:
data = SpliceServer.objects()
data = data.order_by("updated")
_LOG.info("Retrieved %s items to send to %s, since last timestamp of %s" % (len(data), addr, last_timestamp))
return data
def _get_product_usage_data(addr, limit):
"""
Returns product usage data which has not yet been uploaded to 'addr'
@param addr: remote server to upload data to
@param limit: max amount of objects to process per request
@return: list of product usage objects ordered by date
"""
#TODO:
# - Modify query to not fetch the "tracker" field this way it is always blank
prod_usage_data = ProductUsage.objects(tracker__nin=[addr])
prod_usage_data = prod_usage_data.order_by("date")
if limit:
prod_usage_data = prod_usage_data.limit(limit)
# Keep 'tracker' information private to this server
for pu in prod_usage_data:
pu.tracker = [] #
_LOG.info("Retrieved %s items to send to %s" % (len(prod_usage_data), addr))
return prod_usage_data
| gpl-2.0 |
michaelpacer/linkchecker | linkcheck/logger/xmllog.py | 9 | 3361 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Base class for XML loggers.
"""
import xml.sax.saxutils
from . import _Logger
xmlattr_entities = {
"&": "&",
"<": "<",
">": ">",
"\"": """,
}
def xmlquote (s):
"""
Quote characters for XML.
"""
return xml.sax.saxutils.escape(s)
def xmlquoteattr (s):
"""
Quote XML attribute, ready for inclusion with double quotes.
"""
return xml.sax.saxutils.escape(s, xmlattr_entities)
class _XMLLogger (_Logger):
"""Base class for XML output; easy to parse with any XML tool."""
def __init__ (self, **kwargs):
""" Initialize graph node list and internal id counter. """
args = self.get_args(kwargs)
super(_XMLLogger, self).__init__(**args)
self.init_fileoutput(args)
self.indent = u" "
self.level = 0
def comment (self, s, **args):
"""
Write XML comment.
"""
self.write(u"<!-- ")
self.write(s, **args)
self.writeln(u" -->")
def xml_start_output (self):
"""
Write start of checking info as xml comment.
"""
self.writeln(u'<?xml version="1.0" encoding="%s"?>' %
xmlquoteattr(self.get_charset_encoding()))
if self.has_part("intro"):
self.write_intro()
self.writeln()
def xml_end_output (self):
"""
Write end of checking info as xml comment.
"""
if self.has_part("outro"):
self.write_outro()
def xml_starttag (self, name, attrs=None):
"""
Write XML start tag.
"""
self.write(self.indent*self.level)
self.write(u"<%s" % xmlquote(name))
if attrs:
for name, value in attrs.items():
args = (xmlquote(name), xmlquoteattr(value))
self.write(u' %s="%s"' % args)
self.writeln(u">")
self.level += 1
def xml_endtag (self, name):
"""
Write XML end tag.
"""
self.level -= 1
assert self.level >= 0
self.write(self.indent*self.level)
self.writeln(u"</%s>" % xmlquote(name))
def xml_tag (self, name, content, attrs=None):
"""
Write XML tag with content.
"""
self.write(self.indent*self.level)
self.write(u"<%s" % xmlquote(name))
if attrs:
for aname, avalue in attrs.items():
args = (xmlquote(aname), xmlquoteattr(avalue))
self.write(u' %s="%s"' % args)
self.writeln(u">%s</%s>" % (xmlquote(content), xmlquote(name)))
| gpl-2.0 |
Bulochkin/tensorflow_pack | tensorflow/python/kernel_tests/pool_test.py | 70 | 14153 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def pool_direct_single_axis(
input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(
input,
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
dilation_rate,
strides,
data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
def testGradient1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testGradient2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testGradient3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
| apache-2.0 |
crazy-cat/incubator-mxnet | example/recommenders/matrix_fact.py | 45 | 1993 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import mxnet as mx
import numpy as np
import mxnet.notebook.callback
import logging
logging.basicConfig(level=logging.DEBUG)
def RMSE(label, pred):
ret = 0.0
n = 0.0
pred = pred.flatten()
for i in range(len(label)):
ret += (label[i] - pred[i]) * (label[i] - pred[i])
n += 1.0
return math.sqrt(ret / n)
def train(network, data_pair, num_epoch, learning_rate, optimizer='sgd', opt_args=None, ctx=[mx.gpu(0)]):
np.random.seed(123) # Fix random seed for consistent demos
mx.random.seed(123) # Fix random seed for consistent demos
if not opt_args:
opt_args = {}
if optimizer=='sgd' and (not opt_args):
opt_args['momentum'] = 0.9
model = mx.model.FeedForward(
ctx = ctx,
symbol = network,
num_epoch = num_epoch,
optimizer = optimizer,
learning_rate = learning_rate,
wd = 1e-4,
**opt_args
)
train, test = (data_pair)
lc = mxnet.notebook.callback.LiveLearningCurve('RMSE', 1)
model.fit(X = train,
eval_data = test,
eval_metric = RMSE,
**mxnet.notebook.callback.args_wrapper(lc)
)
return lc
| apache-2.0 |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/user_unittest.py | 124 | 7300 | # Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
class UserTest(unittest.TestCase):
example_user_response = "example user response"
def test_prompt_repeat(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
if not self.repeatsRemaining:
return UserTest.example_user_response
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
def test_prompt_when_exceeded_repeats(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
def test_prompt_with_multiple_lists(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_multiple_lists,
args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["3"], "foobar")
run_prompt_test(["4"], "barbaz")
run_prompt_test(["5"], "foobaz")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
def test_prompt_with_list(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_list,
args=["title", ["foo", "bar"]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n 1. foo\n 2. bar\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
def test_confirm(self):
test_cases = (
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
(("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
)
for test_case in test_cases:
expected, inputs = test_case
def mock_raw_input(message):
self.assertEqual(expected[0], message)
return inputs[1]
result = User().confirm(default=inputs[0],
raw_input=mock_raw_input)
self.assertEqual(expected[1], result)
def test_warn_if_application_is_xcode(self):
output = OutputCapture()
user = User()
output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching
xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning)
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
| bsd-3-clause |
jymannob/Sick-Beard | lib/requests/packages/chardet/langthaimodel.py | 235 | 11298 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = ( \
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = { \
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': constants.False,
'charsetName': "TIS-620"
}
| gpl-3.0 |
gsehub/edx-platform | common/lib/xmodule/xmodule/tests/test_annotatable_module.py | 13 | 5963 | """Module annotatable tests"""
import unittest
from lxml import etree
from mock import Mock
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.annotatable_module import AnnotatableModule
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from . import get_test_system
class AnnotatableModuleTestCase(unittest.TestCase):
shard = 1
sample_xml = '''
<annotatable display_name="Iliad">
<instructions>Read the text.</instructions>
<p>
<annotation body="first">Sing</annotation>,
<annotation title="goddess" body="second">O goddess</annotation>,
<annotation title="anger" body="third" highlight="blue">the anger of Achilles son of Peleus</annotation>,
that brought <i>countless</i> ills upon the Achaeans. Many a brave soul did it send
hurrying down to Hades, and many a hero did it yield a prey to dogs and
<div style="font-weight:bold"><annotation body="fourth" problem="4">vultures</annotation>, for so were the counsels
of Jove fulfilled from the day on which the son of Atreus, king of men, and great
Achilles, first fell out with one another.</div>
</p>
<annotation title="footnote" body="the end">The Iliad of Homer by Samuel Butler</annotation>
</annotatable>
'''
def setUp(self):
super(AnnotatableModuleTestCase, self).setUp()
self.annotatable = AnnotatableModule(
Mock(),
get_test_system(),
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, BlockUsageLocator(CourseLocator('org', 'course', 'run'), 'category', 'name'))
)
def test_annotation_data_attr(self):
el = etree.fromstring('<annotation title="bar" body="foo" problem="0">test</annotation>')
expected_attr = {
'data-comment-body': {'value': 'foo', '_delete': 'body'},
'data-comment-title': {'value': 'bar', '_delete': 'title'},
'data-problem-id': {'value': '0', '_delete': 'problem'}
}
actual_attr = self.annotatable._get_annotation_data_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_default(self):
xml = '<annotation title="x" body="y" problem="0">test</annotation>'
el = etree.fromstring(xml)
expected_attr = {'class': {'value': 'annotatable-span highlight'}}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_valid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for color in self.annotatable.highlight_colors:
el = etree.fromstring(xml.format(highlight=color))
value = 'annotatable-span highlight highlight-{highlight}'.format(highlight=color)
expected_attr = {
'class': {
'value': value,
'_delete': 'highlight'
}
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_annotation_class_attr_with_invalid_highlight(self):
xml = '<annotation title="x" body="y" problem="0" highlight="{highlight}">test</annotation>'
for invalid_color in ['rainbow', 'blink', 'invisible', '', None]:
el = etree.fromstring(xml.format(highlight=invalid_color))
expected_attr = {
'class': {
'value': 'annotatable-span highlight',
'_delete': 'highlight'
}
}
actual_attr = self.annotatable._get_annotation_class_attr(0, el)
self.assertIsInstance(actual_attr, dict)
self.assertDictEqual(expected_attr, actual_attr)
def test_render_annotation(self):
expected_html = '<span class="annotatable-span highlight highlight-yellow" data-comment-title="x" data-comment-body="y" data-problem-id="0">z</span>'
expected_el = etree.fromstring(expected_html)
actual_el = etree.fromstring('<annotation title="x" body="y" problem="0" highlight="yellow">z</annotation>')
self.annotatable._render_annotation(0, actual_el)
self.assertEqual(expected_el.tag, actual_el.tag)
self.assertEqual(expected_el.text, actual_el.text)
self.assertDictEqual(dict(expected_el.attrib), dict(actual_el.attrib))
def test_render_content(self):
content = self.annotatable._render_content()
el = etree.fromstring(content)
self.assertEqual('div', el.tag, 'root tag is a div')
expected_num_annotations = 5
actual_num_annotations = el.xpath('count(//span[contains(@class,"annotatable-span")])')
self.assertEqual(expected_num_annotations, actual_num_annotations, 'check number of annotations')
def test_get_html(self):
context = self.annotatable.get_html()
for key in ['display_name', 'element_id', 'content_html', 'instructions_html']:
self.assertIn(key, context)
def test_extract_instructions(self):
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div>Read the text.</div>"
actual_xml = self.annotatable._extract_instructions(xmltree)
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.annotatable._extract_instructions(xmltree)
self.assertIsNone(actual)
| agpl-3.0 |
zahari/samba | source4/dsdb/tests/python/sec_descriptor.py | 27 | 106756 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import sys
import os
import base64
import re
import random
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import SubunitOptions, TestProgram
import samba.getopt as options
# Some error messages that are being tested
from ldb import SCOPE_SUBTREE, SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT
# For running the test unit
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials, DONT_USE_KERBEROS
from samba.auth import system_session
from samba.dsdb import DS_DOMAIN_FUNCTION_2008
from samba.dcerpc.security import (
SECINFO_OWNER, SECINFO_GROUP, SECINFO_DACL, SECINFO_SACL)
import samba.tests
from samba.tests import delete_force
parser = optparse.OptionParser("sec_descriptor.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class DescriptorTests(samba.tests.TestCase):
def get_users_domain_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_unique_schema_class_name(self):
while True:
class_name = "test-class%s" % random.randint(1,100000)
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
try:
self.ldb_admin.search(base=class_dn, attrs=["*"])
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
return class_name
def create_schema_class(self, _ldb, object_dn, desc=None):
ldif = """
dn: """ + object_dn + """
objectClass: classSchema
objectCategory: CN=Class-Schema,""" + self.schema_dn + """
defaultObjectCategory: """ + object_dn + """
distinguishedName: """ + object_dn + """
governsID: 1.2.840.""" + str(random.randint(1,100000)) + """.1.5.9939
instanceType: 4
objectClassCategory: 1
subClassOf: organizationalPerson
systemFlags: 16
rDNAttID: cn
systemMustContain: cn
systemOnly: FALSE
"""
if desc:
assert(isinstance(desc, str) or isinstance(desc, security.descriptor))
if isinstance(desc, str):
ldif += "nTSecurityDescriptor: %s" % desc
elif isinstance(desc, security.descriptor):
ldif += "nTSecurityDescriptor:: %s" % base64.b64encode(ndr_pack(desc))
_ldb.add_ldif(ldif)
def create_configuration_container(self, _ldb, object_dn, desc=None):
ldif = """
dn: """ + object_dn + """
objectClass: container
objectCategory: CN=Container,""" + self.schema_dn + """
showInAdvancedViewOnly: TRUE
instanceType: 4
"""
if desc:
assert(isinstance(desc, str) or isinstance(desc, security.descriptor))
if isinstance(desc, str):
ldif += "nTSecurityDescriptor: %s" % desc
elif isinstance(desc, security.descriptor):
ldif += "nTSecurityDescriptor:: %s" % base64.b64encode(ndr_pack(desc))
_ldb.add_ldif(ldif)
def create_configuration_specifier(self, _ldb, object_dn, desc=None):
ldif = """
dn: """ + object_dn + """
objectClass: displaySpecifier
showInAdvancedViewOnly: TRUE
"""
if desc:
assert(isinstance(desc, str) or isinstance(desc, security.descriptor))
if isinstance(desc, str):
ldif += "nTSecurityDescriptor: %s" % desc
elif isinstance(desc, security.descriptor):
ldif += "nTSecurityDescriptor:: %s" % base64.b64encode(ndr_pack(desc))
_ldb.add_ldif(ldif)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(target_password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
creds_tmp.set_kerberos_state(DONT_USE_KERBEROS) # kinit is too expensive to use in a tight loop
ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp)
return ldb_target
def setUp(self):
super(DescriptorTests, self).setUp()
self.ldb_admin = SamDB(host, credentials=creds, session_info=system_session(lp), lp=lp,
options=ldb_options)
self.base_dn = self.ldb_admin.domain_dn()
self.configuration_dn = self.ldb_admin.get_config_basedn().get_linearized()
self.schema_dn = self.ldb_admin.get_schema_basedn().get_linearized()
self.domain_sid = security.dom_sid(self.ldb_admin.get_domain_sid())
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
print "baseDN: %s" % self.base_dn
################################################################################################
## Tests for DOMAIN
# Default descriptor tests #####################################################################
class OwnerGroupDescriptorTests(DescriptorTests):
def deleteAll(self):
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser1"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser2"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser3"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser4"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser5"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser6"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser7"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser8"))
# DOMAIN
delete_force(self.ldb_admin, self.get_users_domain_dn("test_domain_group1"))
delete_force(self.ldb_admin, "CN=test_domain_user1,OU=test_domain_ou1," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_domain_ou2,OU=test_domain_ou1," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_domain_ou1," + self.base_dn)
# SCHEMA
# CONFIGURATION
delete_force(self.ldb_admin, "CN=test-specifier1,CN=test-container1,CN=DisplaySpecifiers," \
+ self.configuration_dn)
delete_force(self.ldb_admin, "CN=test-container1,CN=DisplaySpecifiers," + self.configuration_dn)
def setUp(self):
super(OwnerGroupDescriptorTests, self).setUp()
self.deleteAll()
### Create users
# User 1 - Enterprise Admins
self.ldb_admin.newuser("testuser1", "samba123@")
# User 2 - Domain Admins
self.ldb_admin.newuser("testuser2", "samba123@")
# User 3 - Schema Admins
self.ldb_admin.newuser("testuser3", "samba123@")
# User 4 - regular user
self.ldb_admin.newuser("testuser4", "samba123@")
# User 5 - Enterprise Admins and Domain Admins
self.ldb_admin.newuser("testuser5", "samba123@")
# User 6 - Enterprise Admins, Domain Admins, Schema Admins
self.ldb_admin.newuser("testuser6", "samba123@")
# User 7 - Domain Admins and Schema Admins
self.ldb_admin.newuser("testuser7", "samba123@")
# User 5 - Enterprise Admins and Schema Admins
self.ldb_admin.newuser("testuser8", "samba123@")
self.ldb_admin.add_remove_group_members("Enterprise Admins",
["testuser1", "testuser5", "testuser6", "testuser8"],
add_members_operation=True)
self.ldb_admin.add_remove_group_members("Domain Admins",
["testuser2","testuser5","testuser6","testuser7"],
add_members_operation=True)
self.ldb_admin.add_remove_group_members("Schema Admins",
["testuser3","testuser6","testuser7","testuser8"],
add_members_operation=True)
self.results = {
# msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
"ds_behavior_win2003" : {
"100" : "O:EAG:DU",
"101" : "O:DAG:DU",
"102" : "O:%sG:DU",
"103" : "O:%sG:DU",
"104" : "O:DAG:DU",
"105" : "O:DAG:DU",
"106" : "O:DAG:DU",
"107" : "O:EAG:DU",
"108" : "O:DAG:DA",
"109" : "O:DAG:DA",
"110" : "O:%sG:DA",
"111" : "O:%sG:DA",
"112" : "O:DAG:DA",
"113" : "O:DAG:DA",
"114" : "O:DAG:DA",
"115" : "O:DAG:DA",
"130" : "O:EAG:DU",
"131" : "O:DAG:DU",
"132" : "O:SAG:DU",
"133" : "O:%sG:DU",
"134" : "O:EAG:DU",
"135" : "O:SAG:DU",
"136" : "O:SAG:DU",
"137" : "O:SAG:DU",
"138" : "O:DAG:DA",
"139" : "O:DAG:DA",
"140" : "O:%sG:DA",
"141" : "O:%sG:DA",
"142" : "O:DAG:DA",
"143" : "O:DAG:DA",
"144" : "O:DAG:DA",
"145" : "O:DAG:DA",
"160" : "O:EAG:DU",
"161" : "O:DAG:DU",
"162" : "O:%sG:DU",
"163" : "O:%sG:DU",
"164" : "O:EAG:DU",
"165" : "O:EAG:DU",
"166" : "O:DAG:DU",
"167" : "O:EAG:DU",
"168" : "O:DAG:DA",
"169" : "O:DAG:DA",
"170" : "O:%sG:DA",
"171" : "O:%sG:DA",
"172" : "O:DAG:DA",
"173" : "O:DAG:DA",
"174" : "O:DAG:DA",
"175" : "O:DAG:DA",
},
# msDS-Behavior-Version >= DS_DOMAIN_FUNCTION_2008
"ds_behavior_win2008" : {
"100" : "O:EAG:EA",
"101" : "O:DAG:DA",
"102" : "O:%sG:DU",
"103" : "O:%sG:DU",
"104" : "O:DAG:DA",
"105" : "O:DAG:DA",
"106" : "O:DAG:DA",
"107" : "O:EAG:EA",
"108" : "O:DAG:DA",
"109" : "O:DAG:DA",
"110" : "O:%sG:DA",
"111" : "O:%sG:DA",
"112" : "O:DAG:DA",
"113" : "O:DAG:DA",
"114" : "O:DAG:DA",
"115" : "O:DAG:DA",
"130" : "O:EAG:EA",
"131" : "O:DAG:DA",
"132" : "O:SAG:SA",
"133" : "O:%sG:DU",
"134" : "O:EAG:EA",
"135" : "O:SAG:SA",
"136" : "O:SAG:SA",
"137" : "O:SAG:SA",
"138" : "",
"139" : "",
"140" : "O:%sG:DA",
"141" : "O:%sG:DA",
"142" : "",
"143" : "",
"144" : "",
"145" : "",
"160" : "O:EAG:EA",
"161" : "O:DAG:DA",
"162" : "O:%sG:DU",
"163" : "O:%sG:DU",
"164" : "O:EAG:EA",
"165" : "O:EAG:EA",
"166" : "O:DAG:DA",
"167" : "O:EAG:EA",
"168" : "O:DAG:DA",
"169" : "O:DAG:DA",
"170" : "O:%sG:DA",
"171" : "O:%sG:DA",
"172" : "O:DAG:DA",
"173" : "O:DAG:DA",
"174" : "O:DAG:DA",
"175" : "O:DAG:DA",
},
}
# Discover 'domainControllerFunctionality'
res = self.ldb_admin.search(base="", scope=SCOPE_BASE,
attrs=['domainControllerFunctionality'])
res = int(res[0]['domainControllerFunctionality'][0])
if res < DS_DOMAIN_FUNCTION_2008:
self.DS_BEHAVIOR = "ds_behavior_win2003"
else:
self.DS_BEHAVIOR = "ds_behavior_win2008"
def tearDown(self):
super(DescriptorTests, self).tearDown()
self.deleteAll()
def check_user_belongs(self, user_dn, groups=[]):
""" Test wether user is member of the expected group(s) """
if groups != []:
# User is member of at least one additional group
res = self.ldb_admin.search(user_dn, attrs=["memberOf"])
res = [x.upper() for x in sorted(list(res[0]["memberOf"]))]
expected = []
for x in groups:
expected.append(self.get_users_domain_dn(x))
expected = [x.upper() for x in sorted(expected)]
self.assertEqual(expected, res)
else:
# User is not a member of any additional groups but default
res = self.ldb_admin.search(user_dn, attrs=["*"])
res = [x.upper() for x in res[0].keys()]
self.assertFalse( "MEMBEROF" in res)
def check_modify_inheritance(self, _ldb, object_dn, owner_group=""):
# Modify
sd_user_utils = sd_utils.SDUtils(_ldb)
ace = "(D;;CC;;;LG)" # Deny Create Children to Guest account
if owner_group != "":
sd_user_utils.modify_sd_on_dn(object_dn, owner_group + "D:" + ace)
else:
sd_user_utils.modify_sd_on_dn(object_dn, "D:" + ace)
# Make sure the modify operation has been applied
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
self.assertTrue(ace in desc_sddl)
# Make sure we have identical result for both "add" and "modify"
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
print self._testMethodName
test_number = self._testMethodName[5:]
self.assertEqual(self.results[self.DS_BEHAVIOR][test_number], res)
def test_100(self):
""" Enterprise admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_101(self):
""" Domain admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_102(self):
""" Schema admin group member with CC right creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WPWDCC;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create additional object into the first one
object_dn = "CN=test_domain_user1," + object_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newuser("test_domain_user1", "samba123@",
userou="OU=test_domain_ou1", setpassword=False)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
# This fails, research why
#self.check_modify_inheritance(_ldb, object_dn)
def test_103(self):
""" Regular user with CC right creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WPWDCC;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create additional object into the first one
object_dn = "CN=test_domain_user1," + object_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newuser("test_domain_user1", "samba123@",
userou="OU=test_domain_ou1", setpassword=False)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
#this fails, research why
#self.check_modify_inheritance(_ldb, object_dn)
def test_104(self):
""" Enterprise & Domain admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_105(self):
""" Enterprise & Domain & Schema admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_106(self):
""" Domain & Schema admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_107(self):
""" Enterprise & Schema admin group member creates object (default nTSecurityDescriptor) in DOMAIN
"""
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newgroup("test_domain_group1", grouptype=4)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
# Control descriptor tests #####################################################################
def test_108(self):
""" Enterprise admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_109(self):
""" Domain admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_110(self):
""" Schema admin group member with CC right creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WOWDCC;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
# Create additional object into the first one
object_dn = "CN=test_domain_user1," + object_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newuser("test_domain_user1", "samba123@",
userou="OU=test_domain_ou1", sd=tmp_desc, setpassword=False)
desc = self.sd_utils.read_sd_on_dn(object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_111(self):
""" Regular user with CC right creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WOWDCC;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
# Create additional object into the first one
object_dn = "CN=test_domain_user1," + object_dn
delete_force(self.ldb_admin, object_dn)
_ldb.newuser("test_domain_user1", "samba123@",
userou="OU=test_domain_ou1", sd=tmp_desc, setpassword=False)
desc = self.sd_utils.read_sd_on_dn(object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_112(self):
""" Domain & Enterprise admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_113(self):
""" Domain & Enterprise & Schema admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_114(self):
""" Domain & Schema admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_115(self):
""" Enterprise & Schema admin group member creates object (custom descriptor) in DOMAIN
"""
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
object_dn = "CN=test_domain_group1,CN=Users," + self.base_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
sddl = "O:DAG:DAD:(A;;RP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
_ldb.newgroup("test_domain_group1", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
def test_999(self):
user_name = "Administrator"
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(D;CI;WP;;;S-1-3-0)"
#mod = ""
self.sd_utils.dacl_add_ace(object_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
# Create additional object into the first one
object_dn = "OU=test_domain_ou2," + object_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
## Tests for SCHEMA
# Defalt descriptor tests ##################################################################
def test_130(self):
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
def test_131(self):
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
def test_132(self):
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
#self.check_modify_inheritance(_ldb, class_dn)
def test_133(self):
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
#Change Schema partition descriptor
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
#self.check_modify_inheritance(_ldb, class_dn)
def test_134(self):
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
#Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
def test_135(self):
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
def test_136(self):
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
def test_137(self):
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, class_dn)
# Custom descriptor tests ##################################################################
def test_138(self):
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_139(self):
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_140(self):
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
desc_sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_141(self):
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
desc_sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_142(self):
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_143(self):
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_144(self):
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_145(self):
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Change Schema partition descriptor
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(self.schema_dn, mod)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
# Create example Schema class
class_name = self.get_unique_schema_class_name()
class_dn = "CN=%s,%s" % (class_name, self.schema_dn)
self.create_schema_class(_ldb, class_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(class_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
## Tests for CONFIGURATION
# Defalt descriptor tests ##################################################################
def test_160(self):
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_161(self):
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_162(self):
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
object_dn = "CN=test-container1,CN=DisplaySpecifiers," + self.configuration_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(self.ldb_admin, object_dn, )
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create child object with user's credentials
object_dn = "CN=test-specifier1," + object_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_specifier(_ldb, object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
#self.check_modify_inheritance(_ldb, object_dn)
def test_163(self):
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
object_dn = "CN=test-container1,CN=DisplaySpecifiers," + self.configuration_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(self.ldb_admin, object_dn, )
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;CI;WDCC;;;AU)"
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create child object with user's credentials
object_dn = "CN=test-specifier1," + object_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_specifier(_ldb, object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
#self.check_modify_inheritance(_ldb, object_dn)
def test_164(self):
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_165(self):
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_166(self):
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
def test_167(self):
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(_ldb, object_dn, )
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]], res)
self.check_modify_inheritance(_ldb, object_dn)
# Custom descriptor tests ##################################################################
def test_168(self):
user_name = "testuser1"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_169(self):
user_name = "testuser2"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_170(self):
user_name = "testuser3"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
object_dn = "CN=test-container1,CN=DisplaySpecifiers," + self.configuration_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(self.ldb_admin, object_dn, )
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create child object with user's credentials
object_dn = "CN=test-specifier1," + object_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
desc_sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
self.create_configuration_specifier(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_171(self):
user_name = "testuser4"
self.check_user_belongs(self.get_users_domain_dn(user_name), [])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
object_dn = "CN=test-container1,CN=DisplaySpecifiers," + self.configuration_dn
delete_force(self.ldb_admin, object_dn)
self.create_configuration_container(self.ldb_admin, object_dn, )
user_sid = self.sd_utils.get_object_sid( self.get_users_domain_dn(user_name) )
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(object_dn, mod)
# Create child object with user's credentials
object_dn = "CN=test-specifier1," + object_dn
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
# NB! Problematic owner part won't accept DA only <User Sid> !!!
desc_sddl = "O:%sG:DAD:(A;;RP;;;DU)" % str(user_sid)
self.create_configuration_specifier(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual(self.results[self.DS_BEHAVIOR][self._testMethodName[5:]] % str(user_sid), res)
def test_172(self):
user_name = "testuser5"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_173(self):
user_name = "testuser6"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_174(self):
user_name = "testuser7"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Domain Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
def test_175(self):
user_name = "testuser8"
self.check_user_belongs(self.get_users_domain_dn(user_name), ["Enterprise Admins", "Schema Admins"])
# Open Ldb connection with the tested user
_ldb = self.get_ldb_connection(user_name, "samba123@")
# Create example Configuration container
container_name = "test-container1"
object_dn = "CN=%s,CN=DisplaySpecifiers,%s" % (container_name, self.configuration_dn)
delete_force(self.ldb_admin, object_dn)
# Create a custom security descriptor
desc_sddl = "O:DAG:DAD:(A;;RP;;;DU)"
self.create_configuration_container(_ldb, object_dn, desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
res = re.search("(O:.*G:.*?)D:", desc_sddl).group(1)
self.assertEqual("O:DAG:DA", res)
########################################################################################
# Inheritance tests for DACL
class DaclDescriptorTests(DescriptorTests):
def deleteAll(self):
delete_force(self.ldb_admin, "CN=test_inherit_group,OU=test_inherit_ou," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou5,OU=test_inherit_ou1,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou6,OU=test_inherit_ou2,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou1,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou2,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou3,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou4,OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou_p," + self.base_dn)
delete_force(self.ldb_admin, "OU=test_inherit_ou," + self.base_dn)
def setUp(self):
super(DaclDescriptorTests, self).setUp()
self.deleteAll()
def create_clean_ou(self, object_dn):
""" Base repeating setup for unittests to follow """
res = self.ldb_admin.search(base=self.base_dn, scope=SCOPE_SUBTREE, \
expression="distinguishedName=%s" % object_dn)
# Make sure top testing OU has been deleted before starting the test
self.assertEqual(len(res), 0)
self.ldb_admin.create_ou(object_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
# Make sure there are inheritable ACEs initially
self.assertTrue("CI" in desc_sddl or "OI" in desc_sddl)
# Find and remove all inherit ACEs
res = re.findall("\(.*?\)", desc_sddl)
res = [x for x in res if ("CI" in x) or ("OI" in x)]
for x in res:
desc_sddl = desc_sddl.replace(x, "")
# Add flag 'protected' in both DACL and SACL so no inherit ACEs
# can propagate from above
# remove SACL, we are not interested
desc_sddl = desc_sddl.replace(":AI", ":AIP")
self.sd_utils.modify_sd_on_dn(object_dn, desc_sddl)
# Verify all inheritable ACEs are gone
desc_sddl = self.sd_utils.get_sd_as_sddl(object_dn)
self.assertFalse("CI" in desc_sddl)
self.assertFalse("OI" in desc_sddl)
def test_200(self):
""" OU with protected flag and child group. See if the group has inherit ACEs.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Create group child object
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4)
# Make sure created group object contains NO inherit ACEs
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("ID" in desc_sddl)
def test_201(self):
""" OU with protected flag and no inherit ACEs, child group with custom descriptor.
Verify group has custom and default ACEs only.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Create group child object using custom security descriptor
sddl = "O:AUG:AUD:AI(D;;WP;;;DU)"
tmp_desc = security.descriptor.from_sddl(sddl, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group descriptor has NO additional ACEs
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertEqual(desc_sddl, sddl)
sddl = "O:AUG:AUD:AI(D;;CC;;;LG)"
self.sd_utils.modify_sd_on_dn(group_dn, sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertEqual(desc_sddl, sddl)
def test_202(self):
""" OU with protected flag and add couple non-inheritable ACEs, child group.
See if the group has any of the added ACEs.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom non-inheritable ACEs
mod = "(D;;WP;;;DU)(A;;RP;;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
# Verify all inheritable ACEs are gone
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4)
# Make sure created group object contains NO inherit ACEs
# also make sure the added above non-inheritable ACEs are absent too
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("ID" in desc_sddl)
for x in re.findall("\(.*?\)", mod):
self.assertFalse(x in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("ID" in desc_sddl)
for x in re.findall("\(.*?\)", mod):
self.assertFalse(x in desc_sddl)
def test_203(self):
""" OU with protected flag and add 'CI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'CI' ACE
mod = "(D;CI;WP;;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";CI;", ";CIID;")
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_204(self):
""" OU with protected flag and add 'OI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'CI' ACE
mod = "(D;OI;WP;;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";OI;", ";OIIOID;") # change it how it's gonna look like
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" +moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_205(self):
""" OU with protected flag and add 'OA' for GUID & 'CI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'OA' for 'name' attribute & 'CI' ACE
mod = "(OA;CI;WP;bf967a0e-0de6-11d0-a285-00aa003049e2;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";CI;", ";CIID;") # change it how it's gonna look like
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_206(self):
""" OU with protected flag and add 'OA' for GUID & 'OI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'OA' for 'name' attribute & 'OI' ACE
mod = "(OA;OI;WP;bf967a0e-0de6-11d0-a285-00aa003049e2;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";OI;", ";OIIOID;") # change it how it's gonna look like
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_207(self):
""" OU with protected flag and add 'OA' for OU specific GUID & 'CI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'OA' for 'st' attribute (OU specific) & 'CI' ACE
mod = "(OA;CI;WP;bf967a39-0de6-11d0-a285-00aa003049e2;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";CI;", ";CIID;") # change it how it's gonna look like
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_208(self):
""" OU with protected flag and add 'OA' for OU specific GUID & 'OI' ACE, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'OA' for 'st' attribute (OU specific) & 'OI' ACE
mod = "(OA;OI;WP;bf967a39-0de6-11d0-a285-00aa003049e2;;DU)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
mod = mod.replace(";OI;", ";OIIOID;") # change it how it's gonna look like
self.assertTrue(mod in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:(OA;OI;WP;bf967a39-0de6-11d0-a285-00aa003049e2;;DU)" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue(mod in desc_sddl)
def test_209(self):
""" OU with protected flag and add 'CI' ACE with 'CO' SID, child group.
See if the group has the added inherited ACE.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'CI' ACE
mod = "(D;CI;WP;;;CO)"
moded = "(D;;CC;;;LG)"
self.sd_utils.dacl_add_ace(ou_dn, mod)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# Create group child object
tmp_desc = security.descriptor.from_sddl("O:AUG:AUD:AI(A;;CC;;;AU)", self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE(s)
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue("(D;ID;WP;;;AU)" in desc_sddl)
self.assertTrue("(D;CIIOID;WP;;;CO)" in desc_sddl)
self.sd_utils.modify_sd_on_dn(group_dn, "D:" + moded)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue(moded in desc_sddl)
self.assertTrue("(D;ID;WP;;;DA)" in desc_sddl)
self.assertTrue("(D;CIIOID;WP;;;CO)" in desc_sddl)
def test_210(self):
""" OU with protected flag, provide ACEs with ID flag raised. Should be ignored.
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
self.create_clean_ou(ou_dn)
# Add some custom ACE
mod = "D:(D;CIIO;WP;;;CO)(A;ID;WP;;;AU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object does not contain the ID ace
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("(A;ID;WP;;;AU)" in desc_sddl)
def test_211(self):
""" Provide ACE with CO SID, should be expanded and replaced
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'CI' ACE
mod = "D:(D;CI;WP;;;CO)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue("(D;;WP;;;DA)" in desc_sddl)
self.assertTrue("(D;CIIO;WP;;;CO)" in desc_sddl)
def test_212(self):
""" Provide ACE with IO flag, should be ignored
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
# Add some custom 'CI' ACE
mod = "D:(D;CIIO;WP;;;CO)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE(s)
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertTrue("(D;CIIO;WP;;;CO)" in desc_sddl)
self.assertFalse("(D;;WP;;;DA)" in desc_sddl)
self.assertFalse("(D;CIIO;WP;;;CO)(D;CIIO;WP;;;CO)" in desc_sddl)
def test_213(self):
""" Provide ACE with IO flag, should be ignored
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
# Create inheritable-free OU
self.create_clean_ou(ou_dn)
mod = "D:(D;IO;WP;;;DA)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object contains only the above inherited ACE(s)
# that we've added manually
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("(D;IO;WP;;;DA)" in desc_sddl)
def test_214(self):
""" Test behavior of ACEs containing generic rights
"""
ou_dn = "OU=test_inherit_ou_p," + self.base_dn
ou_dn1 = "OU=test_inherit_ou1," + ou_dn
ou_dn2 = "OU=test_inherit_ou2," + ou_dn
ou_dn3 = "OU=test_inherit_ou3," + ou_dn
ou_dn4 = "OU=test_inherit_ou4," + ou_dn
ou_dn5 = "OU=test_inherit_ou5," + ou_dn1
ou_dn6 = "OU=test_inherit_ou6," + ou_dn2
# Create inheritable-free OU
mod = "D:P(A;CI;WPRPLCCCDCWDRC;;;DA)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn, sd=tmp_desc)
mod = "D:(A;CI;GA;;;DU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn1, sd=tmp_desc)
mod = "D:(A;CIIO;GA;;;DU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn2, sd=tmp_desc)
mod = "D:(A;;GA;;;DU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn3, sd=tmp_desc)
mod = "D:(A;IO;GA;;;DU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn4, sd=tmp_desc)
self.ldb_admin.create_ou(ou_dn5)
self.ldb_admin.create_ou(ou_dn6)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn1)
self.assertTrue("(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertTrue("(A;CIIO;GA;;;DU)" in desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn2)
self.assertFalse("(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertTrue("(A;CIIO;GA;;;DU)" in desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn3)
self.assertTrue("(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertFalse("(A;CIIO;GA;;;DU)" in desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn4)
self.assertFalse("(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertFalse("(A;CIIO;GA;;;DU)" in desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn5)
self.assertTrue("(A;ID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertTrue("(A;CIIOID;GA;;;DU)" in desc_sddl)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn6)
self.assertTrue("(A;ID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DU)" in desc_sddl)
self.assertTrue("(A;CIIOID;GA;;;DU)" in desc_sddl)
def test_215(self):
""" Make sure IO flag is removed in child objects
"""
ou_dn = "OU=test_inherit_ou_p," + self.base_dn
ou_dn1 = "OU=test_inherit_ou1," + ou_dn
ou_dn5 = "OU=test_inherit_ou5," + ou_dn1
# Create inheritable-free OU
mod = "D:P(A;CI;WPRPLCCCDCWDRC;;;DA)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn, sd=tmp_desc)
mod = "D:(A;CIIO;WP;;;DU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn1, sd=tmp_desc)
self.ldb_admin.create_ou(ou_dn5)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn5)
self.assertTrue("(A;CIID;WP;;;DU)" in desc_sddl)
self.assertFalse("(A;CIIOID;WP;;;DU)" in desc_sddl)
def test_216(self):
""" Make sure ID ACES provided by user are ignored
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
mod = "D:P(A;;WPRPLCCCDCWDRC;;;DA)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn, sd=tmp_desc)
# Add some custom ACE
mod = "D:(D;ID;WP;;;AU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object does not contain the ID ace
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("(A;ID;WP;;;AU)" in desc_sddl)
self.assertFalse("(A;;WP;;;AU)" in desc_sddl)
def test_217(self):
""" Make sure ID ACES provided by user are not ignored if P flag is set
"""
ou_dn = "OU=test_inherit_ou," + self.base_dn
group_dn = "CN=test_inherit_group," + ou_dn
mod = "D:P(A;;WPRPLCCCDCWDRC;;;DA)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.create_ou(ou_dn, sd=tmp_desc)
# Add some custom ACE
mod = "D:P(A;ID;WP;;;AU)"
tmp_desc = security.descriptor.from_sddl(mod, self.domain_sid)
self.ldb_admin.newgroup("test_inherit_group", groupou="OU=test_inherit_ou", grouptype=4, sd=tmp_desc)
# Make sure created group object does not contain the ID ace
desc_sddl = self.sd_utils.get_sd_as_sddl(group_dn)
self.assertFalse("(A;ID;WP;;;AU)" in desc_sddl)
self.assertTrue("(A;;WP;;;AU)" in desc_sddl)
########################################################################################
class SdFlagsDescriptorTests(DescriptorTests):
def deleteAll(self):
delete_force(self.ldb_admin, "OU=test_sdflags_ou," + self.base_dn)
def setUp(self):
super(SdFlagsDescriptorTests, self).setUp()
self.test_descr = "O:AUG:AUD:(D;;CC;;;LG)S:(OU;;WP;;;AU)"
self.deleteAll()
def test_301(self):
""" Modify a descriptor with OWNER_SECURITY_INFORMATION set.
See that only the owner has been changed.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:%d" % (SECINFO_OWNER)])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the owner
self.assertTrue("O:AU" in desc_sddl)
# make sure nothing else has been modified
self.assertFalse("G:AU" in desc_sddl)
self.assertFalse("D:(D;;CC;;;LG)" in desc_sddl)
self.assertFalse("(OU;;WP;;;AU)" in desc_sddl)
def test_302(self):
""" Modify a descriptor with GROUP_SECURITY_INFORMATION set.
See that only the owner has been changed.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:%d" % (SECINFO_GROUP)])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the group
self.assertTrue("G:AU" in desc_sddl)
# make sure nothing else has been modified
self.assertFalse("O:AU" in desc_sddl)
self.assertFalse("D:(D;;CC;;;LG)" in desc_sddl)
self.assertFalse("(OU;;WP;;;AU)" in desc_sddl)
def test_303(self):
""" Modify a descriptor with SACL_SECURITY_INFORMATION set.
See that only the owner has been changed.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:%d" % (SECINFO_DACL)])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the DACL
self.assertTrue("(D;;CC;;;LG)" in desc_sddl)
# make sure nothing else has been modified
self.assertFalse("O:AU" in desc_sddl)
self.assertFalse("G:AU" in desc_sddl)
self.assertFalse("(OU;;WP;;;AU)" in desc_sddl)
def test_304(self):
""" Modify a descriptor with SACL_SECURITY_INFORMATION set.
See that only the owner has been changed.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:%d" % (SECINFO_SACL)])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the DACL
self.assertTrue("(OU;;WP;;;AU)" in desc_sddl)
# make sure nothing else has been modified
self.assertFalse("O:AU" in desc_sddl)
self.assertFalse("G:AU" in desc_sddl)
self.assertFalse("(D;;CC;;;LG)" in desc_sddl)
def test_305(self):
""" Modify a descriptor with 0x0 set.
Contrary to logic this is interpreted as no control,
which is the same as 0xF
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:0"])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the DACL
self.assertTrue("(OU;;WP;;;AU)" in desc_sddl)
# make sure nothing else has been modified
self.assertTrue("O:AU" in desc_sddl)
self.assertTrue("G:AU" in desc_sddl)
self.assertTrue("(D;;CC;;;LG)" in desc_sddl)
def test_306(self):
""" Modify a descriptor with 0xF set.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
self.sd_utils.modify_sd_on_dn(ou_dn, self.test_descr, controls=["sd_flags:1:15"])
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn)
# make sure we have modified the DACL
self.assertTrue("(OU;;WP;;;AU)" in desc_sddl)
# make sure nothing else has been modified
self.assertTrue("O:AU" in desc_sddl)
self.assertTrue("G:AU" in desc_sddl)
self.assertTrue("(D;;CC;;;LG)" in desc_sddl)
def test_307(self):
""" Read a descriptor with OWNER_SECURITY_INFORMATION
Only the owner part should be returned.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn, controls=["sd_flags:1:%d" % (SECINFO_OWNER)])
# make sure we have read the owner
self.assertTrue("O:" in desc_sddl)
# make sure we have read nothing else
self.assertFalse("G:" in desc_sddl)
self.assertFalse("D:" in desc_sddl)
self.assertFalse("S:" in desc_sddl)
def test_308(self):
""" Read a descriptor with GROUP_SECURITY_INFORMATION
Only the group part should be returned.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn, controls=["sd_flags:1:%d" % (SECINFO_GROUP)])
# make sure we have read the owner
self.assertTrue("G:" in desc_sddl)
# make sure we have read nothing else
self.assertFalse("O:" in desc_sddl)
self.assertFalse("D:" in desc_sddl)
self.assertFalse("S:" in desc_sddl)
def test_309(self):
""" Read a descriptor with SACL_SECURITY_INFORMATION
Only the sacl part should be returned.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn, controls=["sd_flags:1:%d" % (SECINFO_SACL)])
# make sure we have read the owner
self.assertTrue("S:" in desc_sddl)
# make sure we have read nothing else
self.assertFalse("O:" in desc_sddl)
self.assertFalse("D:" in desc_sddl)
self.assertFalse("G:" in desc_sddl)
def test_310(self):
""" Read a descriptor with DACL_SECURITY_INFORMATION
Only the dacl part should be returned.
"""
ou_dn = "OU=test_sdflags_ou," + self.base_dn
self.ldb_admin.create_ou(ou_dn)
desc_sddl = self.sd_utils.get_sd_as_sddl(ou_dn, controls=["sd_flags:1:%d" % (SECINFO_DACL)])
# make sure we have read the owner
self.assertTrue("D:" in desc_sddl)
# make sure we have read nothing else
self.assertFalse("O:" in desc_sddl)
self.assertFalse("S:" in desc_sddl)
self.assertFalse("G:" in desc_sddl)
def test_311(self):
sd_flags = (SECINFO_OWNER |
SECINFO_GROUP |
SECINFO_DACL |
SECINFO_SACL)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
[], controls=None)
self.assertFalse("nTSecurityDescriptor" in res[0])
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["name"], controls=None)
self.assertFalse("nTSecurityDescriptor" in res[0])
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["name"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertFalse("nTSecurityDescriptor" in res[0])
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
[], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["*"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["nTSecurityDescriptor", "*"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["*", "nTSecurityDescriptor"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["nTSecurityDescriptor", "name"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["name", "nTSecurityDescriptor"], controls=["sd_flags:1:%d" % (sd_flags)])
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["nTSecurityDescriptor"], controls=None)
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["name", "nTSecurityDescriptor"], controls=None)
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None,
["nTSecurityDescriptor", "name"], controls=None)
self.assertTrue("nTSecurityDescriptor" in res[0])
tmp = res[0]["nTSecurityDescriptor"][0]
sd = ndr_unpack(security.descriptor, tmp)
sddl = sd.as_sddl(self.sd_utils.domain_sid)
self.assertTrue("O:" in sddl)
self.assertTrue("G:" in sddl)
self.assertTrue("D:" in sddl)
self.assertTrue("S:" in sddl)
def test_312(self):
"""This search is done by the windows dc join..."""
res = self.ldb_admin.search(self.base_dn, SCOPE_BASE, None, ["1.1"],
controls=["extended_dn:1:0", "sd_flags:1:0", "search_options:1:1"])
self.assertFalse("nTSecurityDescriptor" in res[0])
class RightsAttributesTests(DescriptorTests):
def deleteAll(self):
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser_attr"))
delete_force(self.ldb_admin, self.get_users_domain_dn("testuser_attr2"))
delete_force(self.ldb_admin, "OU=test_domain_ou1," + self.base_dn)
def setUp(self):
super(RightsAttributesTests, self).setUp()
self.deleteAll()
### Create users
# User 1
self.ldb_admin.newuser("testuser_attr", "samba123@")
# User 2, Domain Admins
self.ldb_admin.newuser("testuser_attr2", "samba123@")
self.ldb_admin.add_remove_group_members("Domain Admins",
["testuser_attr2"],
add_members_operation=True)
def test_sDRightsEffective(self):
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
print self.get_users_domain_dn("testuser_attr")
user_sid = self.sd_utils.get_object_sid(self.get_users_domain_dn("testuser_attr"))
#give testuser1 read access so attributes can be retrieved
mod = "(A;CI;RP;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
_ldb = self.get_ldb_connection("testuser_attr", "samba123@")
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["sDRightsEffective"])
#user whould have no rights at all
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["sDRightsEffective"][0], "0")
#give the user Write DACL and see what happens
mod = "(A;CI;WD;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["sDRightsEffective"])
#user whould have DACL_SECURITY_INFORMATION
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["sDRightsEffective"][0], ("%d") % SECINFO_DACL)
#give the user Write Owners and see what happens
mod = "(A;CI;WO;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["sDRightsEffective"])
#user whould have DACL_SECURITY_INFORMATION, OWNER_SECURITY_INFORMATION, GROUP_SECURITY_INFORMATION
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["sDRightsEffective"][0], ("%d") % (SECINFO_DACL | SECINFO_GROUP | SECINFO_OWNER))
#no way to grant security privilege bu adding ACE's so we use a memeber of Domain Admins
_ldb = self.get_ldb_connection("testuser_attr2", "samba123@")
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["sDRightsEffective"])
#user whould have DACL_SECURITY_INFORMATION, OWNER_SECURITY_INFORMATION, GROUP_SECURITY_INFORMATION
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["sDRightsEffective"][0], \
("%d") % (SECINFO_DACL | SECINFO_GROUP | SECINFO_OWNER | SECINFO_SACL))
def test_allowedChildClassesEffective(self):
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid(self.get_users_domain_dn("testuser_attr"))
#give testuser1 read access so attributes can be retrieved
mod = "(A;CI;RP;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
_ldb = self.get_ldb_connection("testuser_attr", "samba123@")
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["allowedChildClassesEffective"])
#there should be no allowed child classes
self.assertEquals(len(res), 1)
self.assertFalse("allowedChildClassesEffective" in res[0].keys())
#give the user the right to create children of type user
mod = "(OA;CI;CC;bf967aba-0de6-11d0-a285-00aa003049e2;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["allowedChildClassesEffective"])
# allowedChildClassesEffective should only have one value, user
self.assertEquals(len(res), 1)
self.assertEquals(len(res[0]["allowedChildClassesEffective"]), 1)
self.assertEquals(res[0]["allowedChildClassesEffective"][0], "user")
def test_allowedAttributesEffective(self):
object_dn = "OU=test_domain_ou1," + self.base_dn
delete_force(self.ldb_admin, object_dn)
self.ldb_admin.create_ou(object_dn)
user_sid = self.sd_utils.get_object_sid(self.get_users_domain_dn("testuser_attr"))
#give testuser1 read access so attributes can be retrieved
mod = "(A;CI;RP;;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod)
_ldb = self.get_ldb_connection("testuser_attr", "samba123@")
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["allowedAttributesEffective"])
#there should be no allowed attributes
self.assertEquals(len(res), 1)
self.assertFalse("allowedAttributesEffective" in res[0].keys())
#give the user the right to write displayName and managedBy
mod2 = "(OA;CI;WP;bf967953-0de6-11d0-a285-00aa003049e2;;%s)" % str(user_sid)
mod = "(OA;CI;WP;0296c120-40da-11d1-a9c0-0000f80367c1;;%s)" % str(user_sid)
# also rights to modify an read only attribute, fromEntry
mod3 = "(OA;CI;WP;9a7ad949-ca53-11d1-bbd0-0080c76670c0;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(object_dn, mod + mod2 + mod3)
res = _ldb.search(base=object_dn, expression="", scope=SCOPE_BASE,
attrs=["allowedAttributesEffective"])
# value should only contain user and managedBy
self.assertEquals(len(res), 1)
self.assertEquals(len(res[0]["allowedAttributesEffective"]), 2)
self.assertTrue("displayName" in res[0]["allowedAttributesEffective"])
self.assertTrue("managedBy" in res[0]["allowedAttributesEffective"])
class SdAutoInheritTests(DescriptorTests):
def deleteAll(self):
delete_force(self.ldb_admin, self.sub_dn)
delete_force(self.ldb_admin, self.ou_dn)
def setUp(self):
super(SdAutoInheritTests, self).setUp()
self.ou_dn = "OU=test_SdAutoInherit_ou," + self.base_dn
self.sub_dn = "OU=test_sub," + self.ou_dn
self.deleteAll()
def test_301(self):
""" Modify a descriptor with OWNER_SECURITY_INFORMATION set.
See that only the owner has been changed.
"""
attrs = ["nTSecurityDescriptor", "replPropertyMetaData", "uSNChanged"]
controls=["sd_flags:1:%d" % (SECINFO_DACL)]
ace = "(A;CI;CC;;;NU)"
sub_ace = "(A;CIID;CC;;;NU)"
sd_sddl = "O:BAG:BAD:P(A;CI;0x000f01ff;;;AU)"
sd = security.descriptor.from_sddl(sd_sddl, self.domain_sid)
self.ldb_admin.create_ou(self.ou_dn,sd=sd)
self.ldb_admin.create_ou(self.sub_dn)
ou_res0 = self.sd_utils.ldb.search(self.ou_dn, SCOPE_BASE,
None, attrs, controls=controls)
sub_res0 = self.sd_utils.ldb.search(self.sub_dn, SCOPE_BASE,
None, attrs, controls=controls)
ou_sd0 = ndr_unpack(security.descriptor, ou_res0[0]["nTSecurityDescriptor"][0])
sub_sd0 = ndr_unpack(security.descriptor, sub_res0[0]["nTSecurityDescriptor"][0])
ou_sddl0 = ou_sd0.as_sddl(self.domain_sid)
sub_sddl0 = sub_sd0.as_sddl(self.domain_sid)
self.assertFalse(ace in ou_sddl0)
self.assertFalse(ace in sub_sddl0)
ou_sddl1 = (ou_sddl0[:ou_sddl0.index("(")] + ace +
ou_sddl0[ou_sddl0.index("("):])
sub_sddl1 = (sub_sddl0[:sub_sddl0.index("(")] + ace +
sub_sddl0[sub_sddl0.index("("):])
self.sd_utils.modify_sd_on_dn(self.ou_dn, ou_sddl1, controls=controls)
sub_res2 = self.sd_utils.ldb.search(self.sub_dn, SCOPE_BASE,
None, attrs, controls=controls)
ou_res2 = self.sd_utils.ldb.search(self.ou_dn, SCOPE_BASE,
None, attrs, controls=controls)
ou_sd2 = ndr_unpack(security.descriptor, ou_res2[0]["nTSecurityDescriptor"][0])
sub_sd2 = ndr_unpack(security.descriptor, sub_res2[0]["nTSecurityDescriptor"][0])
ou_sddl2 = ou_sd2.as_sddl(self.domain_sid)
sub_sddl2 = sub_sd2.as_sddl(self.domain_sid)
self.assertFalse(ou_sddl2 == ou_sddl0)
self.assertFalse(sub_sddl2 == sub_sddl0)
if ace not in ou_sddl2:
print "ou0: %s" % ou_sddl0
print "ou2: %s" % ou_sddl2
if sub_ace not in sub_sddl2:
print "sub0: %s" % sub_sddl0
print "sub2: %s" % sub_sddl2
self.assertTrue(ace in ou_sddl2)
self.assertTrue(sub_ace in sub_sddl2)
ou_usn0 = int(ou_res0[0]["uSNChanged"][0])
ou_usn2 = int(ou_res2[0]["uSNChanged"][0])
self.assertTrue(ou_usn2 > ou_usn0)
sub_usn0 = int(sub_res0[0]["uSNChanged"][0])
sub_usn2 = int(sub_res2[0]["uSNChanged"][0])
self.assertTrue(sub_usn2 == sub_usn0)
if not "://" in host:
if os.path.isfile(host):
host = "tdb://%s" % host
else:
host = "ldap://%s" % host
# use 'paged_search' module when connecting remotely
if host.lower().startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
TestProgram(module=__name__, opts=subunitopts)
| gpl-3.0 |
PythonCharmers/python-future | src/future/backports/email/parser.py | 82 | 5312 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: email-sig@python.org
"""A parser of RFC 2822 and MIME email messages."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser']
import warnings
from io import StringIO, TextIOWrapper
from future.backports.email.feedparser import FeedParser, BytesFeedParser
from future.backports.email.message import Message
from future.backports.email._policybase import compat32
class Parser(object):
def __init__(self, _class=Message, **_3to2kwargs):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
else: policy = compat32
self._class = _class
self.policy = policy
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class, policy=self.policy)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
class BytesParser(object):
def __init__(self, *args, **kw):
"""Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
self.parser = Parser(*args, **kw)
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a binary file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
with fp:
return self.parser.parse(fp, headersonly)
def parsebytes(self, text, headersonly=False):
"""Create a message structure from a byte string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
text = text.decode('ASCII', errors='surrogateescape')
return self.parser.parsestr(text, headersonly)
class BytesHeaderParser(BytesParser):
def parse(self, fp, headersonly=True):
return BytesParser.parse(self, fp, headersonly=True)
def parsebytes(self, text, headersonly=True):
return BytesParser.parsebytes(self, text, headersonly=True)
| mit |
dpac-vlsi/SynchroTrace | util/pbs/pbs.py | 90 | 6097 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os, popen2, re, sys
class MyPOpen(object):
def __init__(self, cmd, input = None, output = None, bufsize = -1):
self.status = -1
if input is None:
p2c_read, p2c_write = os.pipe()
self.tochild = os.fdopen(p2c_write, 'w', bufsize)
else:
p2c_write = None
if isinstance(input, file):
p2c_read = input.fileno()
elif isinstance(input, str):
input = file(input, 'r')
p2c_read = input.fileno()
elif isinstance(input, int):
p2c_read = input
else:
raise AttributeError
if output is None:
c2p_read, c2p_write = os.pipe()
self.fromchild = os.fdopen(c2p_read, 'r', bufsize)
else:
c2p_read = None
if isinstance(output, file):
c2p_write = output.fileno()
elif isinstance(output, str):
output = file(output, 'w')
c2p_write = output.fileno()
elif isinstance(output, int):
c2p_write = output
else:
raise AttributeError
self.pid = os.fork()
if self.pid == 0:
os.dup2(p2c_read, sys.stdin.fileno())
os.dup2(c2p_write, sys.stdout.fileno())
os.dup2(c2p_write, sys.stderr.fileno())
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
os.close(p2c_read)
os.close(c2p_write)
def poll(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.status = status
return self.status
def wait(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, 0)
if pid == self.pid:
self.status = status
return self.status
class qsub:
def __init__(self):
self.afterok = None
self.hold = False
self.join = False
self.keep_stdout = False
self.keep_stderr = False
self.node_type = None
self.mail_abort = False
self.mail_begin = False
self.mail_end = False
self.name = None
self.stdout = None
self.priority = None
self.queue = None
self.pbshost = None
self.qsub = 'qsub'
self.env = {}
def build(self, script, args = []):
self.cmd = [ self.qsub ]
if self.env:
arg = '-v'
arg += ','.join([ '%s=%s' % i for i in self.env.iteritems() ])
self.cmd.append(arg)
if self.hold:
self.cmd.append('-h')
if self.stdout:
self.cmd.append('-olocalhost:' + self.stdout)
if self.keep_stdout and self.keep_stderr:
self.cmd.append('-koe')
elif self.keep_stdout:
self.cmd.append('-ko')
elif self.keep_stderr:
self.cmd.append('-ke')
else:
self.cmd.append('-kn')
if self.join:
self.cmd.append('-joe')
if self.node_type:
self.cmd.append('-lnodes=' + self.node_type)
if self.mail_abort or self.mail_begin or self.mail_end:
flags = ''
if self.mail_abort:
flags.append('a')
if self.mail_begin:
flags.append('b')
if self.mail_end:
flags.append('e')
if len(flags):
self.cmd.append('-m ' + flags)
else:
self.cmd.append('-mn')
if self.name:
self.cmd.append("-N%s" % self.name)
if self.priority:
self.cmd.append('-p' + self.priority)
if self.queue:
self.cmd.append('-q' + self.queue)
if self.afterok:
self.cmd.append('-Wdepend=afterok:%s' % self.afterok)
self.cmd.extend(args)
self.script = script
self.command = ' '.join(self.cmd + [ self.script ])
def do(self):
pbs = MyPOpen(self.cmd + [ self.script ])
self.result = pbs.fromchild.read()
ec = pbs.wait()
if ec != 0 and self.pbshost:
cmd = ' '.join(self.cmd + [ '-' ])
cmd = [ 'ssh', '-x', self.pbshost, cmd ]
self.command = ' '.join(cmd)
ssh = MyPOpen(cmd, input = self.script)
self.result = ssh.fromchild.read()
ec = ssh.wait()
return ec
| bsd-3-clause |
heathseals/CouchPotatoServer | libs/xmpp/__init__.py | 212 | 1795 | # $Id: __init__.py,v 1.9 2005/03/07 09:34:51 snakeru Exp $
"""
All features of xmpppy library contained within separate modules.
At present there are modules:
simplexml - XML handling routines
protocol - jabber-objects (I.e. JID and different stanzas and sub-stanzas) handling routines.
debug - Jacob Lundquist's debugging module. Very handy if you like colored debug.
auth - Non-SASL and SASL stuff. You will need it to auth as a client or transport.
transports - low level connection handling. TCP and TLS currently. HTTP support planned.
roster - simple roster for use in clients.
dispatcher - decision-making logic. Handles all hooks. The first who takes control over fresh stanzas.
features - different stuff that didn't worths separating into modules
browser - DISCO server framework. Allows to build dynamic disco tree.
filetransfer - Currently contains only IBB stuff. Can be used for bot-to-bot transfers.
Most of the classes that is defined in all these modules is an ancestors of
class PlugIn so they share a single set of methods allowing you to compile
a featured XMPP client. For every instance of PlugIn class the 'owner' is the class
in what the plug was plugged. While plugging in such instance usually sets some
methods of owner to it's own ones for easy access. All session specific info stored
either in instance of PlugIn or in owner's instance. This is considered unhandy
and there are plans to port 'Session' class from xmppd.py project for storing all
session-related info. Though if you are not accessing instances variables directly
and use only methods for access all values you should not have any problems.
"""
import simplexml,protocol,debug,auth,transports,roster,dispatcher,features,browser,filetransfer,commands
from client import *
from protocol import *
| gpl-3.0 |
dcramer/django-compositepks | django/contrib/gis/gdal/geomtype.py | 7 | 2470 | from django.contrib.gis.gdal.error import OGRException
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, basestring):
num = self._str_types.get(type_input.lower(), None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name
if s in ('Unknown', 'LinearRing', 'None'):
return None
else:
return s + 'Field'
| bsd-3-clause |
shhui/horizon | horizon/browsers/views.py | 9 | 2027 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon.tables import MultiTableView # noqa
from horizon.utils import memoized
class ResourceBrowserView(MultiTableView):
browser_class = None
def __init__(self, *args, **kwargs):
if not self.browser_class:
raise ValueError("You must specify a ResourceBrowser subclass "
"for the browser_class attribute on %s."
% self.__class__.__name__)
self.table_classes = (self.browser_class.navigation_table_class,
self.browser_class.content_table_class)
self.navigation_selection = False
super(ResourceBrowserView, self).__init__(*args, **kwargs)
@memoized.memoized_method
def get_browser(self):
browser = self.browser_class(self.request, **self.kwargs)
browser.set_tables(self.get_tables())
if not self.navigation_selection:
ct = browser.content_table
item = browser.navigable_item_name.lower()
ct._no_data_message = _("Select a %s to browse.") % item
return browser
def get_context_data(self, **kwargs):
context = super(ResourceBrowserView, self).get_context_data(**kwargs)
browser = self.get_browser()
context["%s_browser" % browser.name] = browser
return context
| apache-2.0 |
gofed/gofed-ng | common/helpers/file.py | 1 | 1290 | #!/bin/python
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import hashlib
_BLOCKSIZE = 65535
def file_hash(path):
h = hashlib.sha1()
with open(path, 'rb') as f:
buf = f.read(_BLOCKSIZE)
while len(buf) > 0:
h.update(buf)
buf = f.read(_BLOCKSIZE)
return h.hexdigest()
def blob_hash(blob):
h = hashlib.sha1()
h.update(blob)
return h.hexdigest()
if __name__ == "__main__":
sys.exit(1)
| gpl-3.0 |
socam/commhistory-daemon | tools/xincludator.py | 30 | 1291 | #!/usr/bin/python
from sys import argv, stdout, stderr
import codecs, locale
import os
import xml.dom.minidom
stdout = codecs.getwriter('utf-8')(stdout)
NS_XI = 'http://www.w3.org/2001/XInclude'
def xincludate(dom, base, dropns = []):
remove_attrs = []
for i in xrange(dom.documentElement.attributes.length):
attr = dom.documentElement.attributes.item(i)
if attr.prefix == 'xmlns':
if attr.localName in dropns:
remove_attrs.append(attr)
else:
dropns.append(attr.localName)
for attr in remove_attrs:
dom.documentElement.removeAttributeNode(attr)
for include in dom.getElementsByTagNameNS(NS_XI, 'include'):
href = include.getAttribute('href')
# FIXME: assumes Unixy paths
filename = os.path.join(os.path.dirname(base), href)
subdom = xml.dom.minidom.parse(filename)
xincludate(subdom, filename, dropns)
if './' in href:
subdom.documentElement.setAttribute('xml:base', href)
include.parentNode.replaceChild(subdom.documentElement, include)
if __name__ == '__main__':
argv = argv[1:]
dom = xml.dom.minidom.parse(argv[0])
xincludate(dom, argv[0])
xml = dom.toxml()
stdout.write(xml)
stdout.write('\n')
| lgpl-2.1 |
VirtueSecurity/aws-extender | BappModules/boto/emr/emrobject.py | 136 | 12903 | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2010 Jeremy Thurgood <firxen+boto@gmail.com>
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module contains EMR response objects
"""
from boto.resultset import ResultSet
class EmrObject(object):
Fields = set()
def __init__(self, connection=None):
self.connection = connection
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in self.Fields:
setattr(self, name.lower(), value)
class RunJobFlowResponse(EmrObject):
Fields = set(['JobFlowId'])
class AddInstanceGroupsResponse(EmrObject):
Fields = set(['InstanceGroupIds', 'JobFlowId'])
class ModifyInstanceGroupsResponse(EmrObject):
Fields = set(['RequestId'])
class Arg(EmrObject):
def __init__(self, connection=None):
self.value = None
def endElement(self, name, value, connection):
self.value = value
class StepId(Arg):
pass
class SupportedProduct(Arg):
pass
class JobFlowStepList(EmrObject):
def __ini__(self, connection=None):
self.connection = connection
self.stepids = None
def startElement(self, name, attrs, connection):
if name == 'StepIds':
self.stepids = ResultSet([('member', StepId)])
return self.stepids
else:
return None
class BootstrapAction(EmrObject):
Fields = set([
'Args',
'Name',
'Path',
'ScriptPath',
])
def startElement(self, name, attrs, connection):
if name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
class KeyValue(EmrObject):
Fields = set([
'Key',
'Value',
])
class Step(EmrObject):
Fields = set([
'ActionOnFailure',
'CreationDateTime',
'EndDateTime',
'Jar',
'LastStateChangeReason',
'MainClass',
'Name',
'StartDateTime',
'State',
])
def __init__(self, connection=None):
self.connection = connection
self.args = None
def startElement(self, name, attrs, connection):
if name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
if name == 'Properties':
self.properties = ResultSet([('member', KeyValue)])
return self.properties
class InstanceGroup(EmrObject):
Fields = set([
'BidPrice',
'CreationDateTime',
'EndDateTime',
'InstanceGroupId',
'InstanceRequestCount',
'InstanceRole',
'InstanceRunningCount',
'InstanceType',
'LastStateChangeReason',
'LaunchGroup',
'Market',
'Name',
'ReadyDateTime',
'StartDateTime',
'State',
])
class JobFlow(EmrObject):
Fields = set([
'AmiVersion',
'AvailabilityZone',
'CreationDateTime',
'Ec2KeyName',
'EndDateTime',
'HadoopVersion',
'Id',
'InstanceCount',
'JobFlowId',
'KeepJobFlowAliveWhenNoSteps',
'LastStateChangeReason',
'LogUri',
'MasterInstanceId',
'MasterInstanceType',
'MasterPublicDnsName',
'Name',
'NormalizedInstanceHours',
'ReadyDateTime',
'RequestId',
'SlaveInstanceType',
'StartDateTime',
'State',
'TerminationProtected',
'Type',
'Value',
'VisibleToAllUsers',
])
def __init__(self, connection=None):
self.connection = connection
self.steps = None
self.instancegroups = None
self.bootstrapactions = None
def startElement(self, name, attrs, connection):
if name == 'Steps':
self.steps = ResultSet([('member', Step)])
return self.steps
elif name == 'InstanceGroups':
self.instancegroups = ResultSet([('member', InstanceGroup)])
return self.instancegroups
elif name == 'BootstrapActions':
self.bootstrapactions = ResultSet([('member', BootstrapAction)])
return self.bootstrapactions
elif name == 'SupportedProducts':
self.supported_products = ResultSet([('member', SupportedProduct)])
return self.supported_products
else:
return None
class ClusterTimeline(EmrObject):
Fields = set([
'CreationDateTime',
'ReadyDateTime',
'EndDateTime'
])
class ClusterStateChangeReason(EmrObject):
Fields = set([
'Code',
'Message'
])
class ClusterStatus(EmrObject):
Fields = set([
'State',
'StateChangeReason',
'Timeline'
])
def __init__(self, connection=None):
self.connection = connection
self.timeline = None
def startElement(self, name, attrs, connection):
if name == 'Timeline':
self.timeline = ClusterTimeline()
return self.timeline
elif name == 'StateChangeReason':
self.statechangereason = ClusterStateChangeReason()
return self.statechangereason
else:
return None
class Ec2InstanceAttributes(EmrObject):
Fields = set([
'Ec2KeyName',
'Ec2SubnetId',
'Ec2AvailabilityZone',
'IamInstanceProfile'
])
class Application(EmrObject):
Fields = set([
'Name',
'Version',
'Args',
'AdditionalInfo'
])
class Cluster(EmrObject):
Fields = set([
'Id',
'Name',
'LogUri',
'RequestedAmiVersion',
'RunningAmiVersion',
'AutoTerminate',
'TerminationProtected',
'VisibleToAllUsers',
'MasterPublicDnsName',
'NormalizedInstanceHours',
'ServiceRole'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
self.ec2instanceattributes = None
self.applications = None
self.tags = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
elif name == 'Ec2InstanceAttributes':
self.ec2instanceattributes = Ec2InstanceAttributes()
return self.ec2instanceattributes
elif name == 'Applications':
self.applications = ResultSet([('member', Application)])
return self.applications
elif name == 'Tags':
self.tags = ResultSet([('member', KeyValue)])
return self.tags
else:
return None
class ClusterSummary(EmrObject):
Fields = set([
'Id',
'Name',
'NormalizedInstanceHours'
])
def __init__(self, connection):
self.connection = connection
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class ClusterSummaryList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection):
self.connection = connection
self.clusters = None
def startElement(self, name, attrs, connection):
if name == 'Clusters':
self.clusters = ResultSet([('member', ClusterSummary)])
return self.clusters
else:
return None
class StepConfig(EmrObject):
Fields = set([
'Jar',
'MainClass'
])
def __init__(self, connection=None):
self.connection = connection
self.properties = None
self.args = None
def startElement(self, name, attrs, connection):
if name == 'Properties':
self.properties = ResultSet([('member', KeyValue)])
return self.properties
elif name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
else:
return None
class HadoopStep(EmrObject):
Fields = set([
'Id',
'Name',
'ActionOnFailure'
])
def __init__(self, connection=None):
self.connection = connection
self.config = None
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Config':
self.config = StepConfig()
return self.config
elif name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceGroupInfo(EmrObject):
Fields = set([
'Id',
'Name',
'Market',
'InstanceGroupType',
'BidPrice',
'InstanceType',
'RequestedInstanceCount',
'RunningInstanceCount'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceGroupList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.instancegroups = None
def startElement(self, name, attrs, connection):
if name == 'InstanceGroups':
self.instancegroups = ResultSet([('member', InstanceGroupInfo)])
return self.instancegroups
else:
return None
class InstanceInfo(EmrObject):
Fields = set([
'Id',
'Ec2InstanceId',
'PublicDnsName',
'PublicIpAddress',
'PrivateDnsName',
'PrivateIpAddress'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
else:
return None
class InstanceList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.instances = None
def startElement(self, name, attrs, connection):
if name == 'Instances':
self.instances = ResultSet([('member', InstanceInfo)])
return self.instances
else:
return None
class StepSummary(EmrObject):
Fields = set([
'Id',
'Name'
])
def __init__(self, connection=None):
self.connection = connection
self.status = None
self.config = None
def startElement(self, name, attrs, connection):
if name == 'Status':
self.status = ClusterStatus()
return self.status
elif name == 'Config':
self.config = StepConfig()
return self.config
else:
return None
class StepSummaryList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.steps = None
def startElement(self, name, attrs, connection):
if name == 'Steps':
self.steps = ResultSet([('member', StepSummary)])
return self.steps
else:
return None
class BootstrapActionList(EmrObject):
Fields = set([
'Marker'
])
def __init__(self, connection=None):
self.connection = connection
self.actions = None
def startElement(self, name, attrs, connection):
if name == 'BootstrapActions':
self.actions = ResultSet([('member', BootstrapAction)])
return self.actions
else:
return None
| mit |
caioserra/apiAdwords | examples/adspygoogle/dfp/v201302/get_team.py | 3 | 1511 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets a team by its ID.
To determine which teams exist, run get_all_teams.py.
Tags: TeamService.getTeam
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v201302')
# Set the ID of the team to get.
team_id = 'INSERT_TEAM_ID_HERE'
# Get team.
team = team_service.GetTeam(team_id)[0]
# Display results.
print ('Team with ID \'%s\' and name \'%s\' was found.'
% (team['id'], team['name']))
| apache-2.0 |
jameschch/Lean | Algorithm.Python/Alphas/GlobalEquityMeanReversionIBSAlpha.py | 3 | 5567 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Portfolio import EqualWeightingPortfolioConstructionModel
from QuantConnect.Algorithm.Framework.Selection import ManualUniverseSelectionModel
#
# Equity indices exhibit mean reversion in daily returns. The Internal Bar Strength indicator (IBS),
# which relates the closing price of a security to its daily range can be used to identify overbought
# and oversold securities.
#
# This alpha ranks 33 global equity ETFs on its IBS value the previous day and predicts for the following day
# that the ETF with the highest IBS value will decrease in price, and the ETF with the lowest IBS value
# will increase in price.
#
# Source: Kakushadze, Zura, and Juan Andrés Serur. “4. Exchange-Traded Funds (ETFs).” 151 Trading Strategies, Palgrave Macmillan, 2018, pp. 90–91.
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open sourced so the community and client funds can see an example of an alpha.
#
class GlobalEquityMeanReversionIBSAlpha(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
# Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# Global Equity ETF tickers
tickers = ["ECH","EEM","EFA","EPHE","EPP","EWA","EWC","EWG",
"EWH","EWI","EWJ","EWL","EWM","EWM","EWO","EWP",
"EWQ","EWS","EWT","EWU","EWY","EWZ","EZA","FXI",
"GXG","IDX","ILF","EWM","QQQ","RSX","SPY","THD"]
symbols = [Symbol.Create(ticker, SecurityType.Equity, Market.USA) for ticker in tickers]
# Manually curated universe
self.UniverseSettings.Resolution = Resolution.Daily
self.SetUniverseSelection(ManualUniverseSelectionModel(symbols))
# Use GlobalEquityMeanReversionAlphaModel to establish insights
self.SetAlpha(MeanReversionIBSAlphaModel())
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
# Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
# Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class MeanReversionIBSAlphaModel(AlphaModel):
'''Uses ranking of Internal Bar Strength (IBS) to create direction prediction for insights'''
def __init__(self, *args, **kwargs):
lookback = kwargs['lookback'] if 'lookback' in kwargs else 1
resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(resolution), lookback)
self.numberOfStocks = kwargs['numberOfStocks'] if 'numberOfStocks' in kwargs else 2
def Update(self, algorithm, data):
insights = []
symbolsIBS = dict()
returns = dict()
for security in algorithm.ActiveSecurities.Values:
if security.HasData:
high = security.High
low = security.Low
hilo = high - low
# Do not consider symbol with zero open and avoid division by zero
if security.Open * hilo != 0:
# Internal bar strength (IBS)
symbolsIBS[security.Symbol] = (security.Close - low)/hilo
returns[security.Symbol] = security.Close/security.Open-1
# Number of stocks cannot be higher than half of symbolsIBS length
number_of_stocks = min(int(len(symbolsIBS)/2), self.numberOfStocks)
if number_of_stocks == 0:
return []
# Rank securities with the highest IBS value
ordered = sorted(symbolsIBS.items(), key=lambda kv: (round(kv[1], 6), kv[0]), reverse=True)
highIBS = dict(ordered[0:number_of_stocks]) # Get highest IBS
lowIBS = dict(ordered[-number_of_stocks:]) # Get lowest IBS
# Emit "down" insight for the securities with the highest IBS value
for key,value in highIBS.items():
insights.append(Insight.Price(key, self.predictionInterval, InsightDirection.Down, abs(returns[key]), None))
# Emit "up" insight for the securities with the lowest IBS value
for key,value in lowIBS.items():
insights.append(Insight.Price(key, self.predictionInterval, InsightDirection.Up, abs(returns[key]), None))
return insights | apache-2.0 |
nagnath006/Soccer-Analytics | Soccer-Analytics/Lib/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mpl-2.0 |
quarckster/cfme_tests | cfme/fixtures/tccheck.py | 10 | 2859 | # -*- coding: utf-8 -*-
"""Plugin that does basic test case validation.
Use ``--validate-test-cases`` to enable it.
Currently does not work on ``--collect-only`` due to pytest's implementation bug.
Error output lines are prefixed by ``[TCV-E]``.
If no error nappens, a line prefixed with ``[TCV-OK]`` appears at the end of collection.
"""
def pytest_addoption(parser):
group = parser.getgroup('cfme')
group.addoption(
'--validate-test-cases', dest='validate_tcs', action='store_true', default=False,
help="Enable test case validation")
def load_available_requirements():
"""Slightly hacky, run through all objects in the module and only pick the correct ones."""
from _pytest.mark import MarkDecorator
from cfme import test_requirements
names = set()
for requirement_name in dir(test_requirements):
if requirement_name.startswith('_') or requirement_name == 'pytest':
continue
requirement_marker = getattr(test_requirements, requirement_name)
if not isinstance(requirement_marker, MarkDecorator):
continue
if requirement_marker.name == 'requirement':
names.add(requirement_marker.args[0])
return names
def check_tier(item):
strings = []
tier = item.get_marker('tier')
if tier is None:
strings.append('[TCV-E] MISSING TIER: {}'.format(item.nodeid))
else:
try:
tier = tier.args[0]
except IndexError:
strings.append('[TCV-E] BAD TIER SPECIFICATION: {}'.format(item.nodeid))
else:
if not 1 <= tier <= 3:
strings.append('[TCV-E] BAD TIER NUMBER ({}): {}'.format(tier, item.nodeid))
return strings
def check_requirement(item, available_requirements):
strings = []
requirement = item.get_marker('requirement')
if requirement is None:
strings.append('[TCV-E] MISSING REQUIREMENT: {}'.format(item.nodeid))
else:
try:
requirement = requirement.args[0]
except IndexError:
strings.append('[TCV-E] BAD REQUIREMENT SPECIFICATION: {}'.format(item.nodeid))
else:
if requirement not in available_requirements:
strings.append(
'[TCV-E] BAD REQUIREMENT STRING ({}): {}'.format(requirement, item.nodeid))
return strings
def pytest_report_collectionfinish(config, startdir, items):
if not config.option.validate_tcs:
return
strings = []
available_requirements = load_available_requirements()
for item in items:
strings.extend(check_tier(item))
strings.extend(check_requirement(item, available_requirements))
if not strings:
strings.append('[TCV-OK] TEST CASES VALIDATED OK!')
else:
strings.append('[TCV-E] SOME TEST CASES NEED REVIEWING!')
return strings
| gpl-2.0 |
robertnishihara/ray | rllib/examples/multi_agent_independent_learning.py | 2 | 1072 | from ray import tune
from ray.tune.registry import register_env
from ray.rllib.env.pettingzoo_env import PettingZooEnv
from pettingzoo.sisl import waterworld_v0
# Based on code from github.com/parametersharingmadrl/parametersharingmadrl
if __name__ == "__main__":
# RDQN - Rainbow DQN
# ADQN - Apex DQN
def env_creator(args):
return PettingZooEnv(waterworld_v0.env())
env = env_creator({})
register_env("waterworld", env_creator)
obs_space = env.observation_space
act_spc = env.action_space
policies = {agent: (None, obs_space, act_spc, {}) for agent in env.agents}
tune.run(
"APEX_DDPG",
stop={"episodes_total": 60000},
checkpoint_freq=10,
config={
# Enviroment specific
"env": "waterworld",
# General
"num_gpus": 1,
"num_workers": 2,
# Method specific
"multiagent": {
"policies": policies,
"policy_mapping_fn": (lambda agent_id: agent_id),
},
},
)
| apache-2.0 |
henrytao-me/openerp.positionq | openerp/addons/procurement/wizard/mrp_procurement.py | 56 | 2022 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
from openerp.osv import fields, osv
class procurement_compute(osv.osv_memory):
_name = 'procurement.order.compute'
_description = 'Compute Procurement'
def _procure_calculation_procure(self, cr, uid, ids, context=None):
try:
proc_obj = self.pool.get('procurement.order')
proc_obj._procure_confirm(cr, uid, use_new_cursor=cr.dbname, context=context)
finally:
pass
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_procure, args=(cr, uid, ids, context))
threaded_calculation.start()
return {}
procurement_compute()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
russellb/nova | nova/tests/test_network_info.py | 9 | 14068 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import log as logging
from nova.network import model
from nova import test
from nova.tests import fake_network_cache_model
LOG = logging.getLogger(__name__)
class RouteTests(test.TestCase):
def test_create_route_with_attrs(self):
route = fake_network_cache_model.new_route()
ip = fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
self.assertEqual(route['cidr'], '0.0.0.0/24')
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertEqual(route['interface'], 'eth0')
def test_routes_equal(self):
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route()
self.assertEqual(route1, route2)
def test_routes_not_equal(self):
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route(dict(cidr='1.1.1.1/24'))
self.assertNotEqual(route1, route2)
def test_hydrate(self):
route = model.Route.hydrate(
{'gateway': fake_network_cache_model.new_ip(
dict(address='192.168.1.1'))})
self.assertEqual(route['cidr'], None)
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertEqual(route['interface'], None)
class FixedIPTests(test.TestCase):
def test_createnew_fixed_ip_with_attrs(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
self.assertEqual(fixed_ip['address'], '192.168.1.100')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 4)
def test_create_fixed_ipv6(self):
fixed_ip = model.FixedIP(address='::1')
self.assertEqual(fixed_ip['address'], '::1')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 6)
def test_create_fixed_bad_ip_fails(self):
self.assertRaises(exception.InvalidIpAddressError,
model.FixedIP,
address='picklespicklespickles')
def test_equate_two_fixed_ips(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::1')
self.assertEqual(fixed_ip, fixed_ip2)
def test_equate_two_dissimilar_fixed_ips_fails(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::2')
self.assertNotEqual(fixed_ip, fixed_ip2)
def test_hydrate(self):
fixed_ip = model.FixedIP.hydrate({})
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['address'], None)
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], None)
def test_add_floating_ip(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in xrange(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
class SubnetTests(test.TestCase):
def test_create_subnet_with_attrs(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
self.assertEqual(subnet['cidr'], '10.10.0.0/24')
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3'))])
self.assertEqual(subnet['routes'], [route1])
self.assertEqual(subnet['version'], 4)
def test_add_route(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_route_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in xrange(10):
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_dns(self):
subnet = fake_network_cache_model.new_subnet()
dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
subnet.add_dns(dns)
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_ip(self):
subnet = fake_network_cache_model.new_subnet()
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_hydrate(self):
subnet_dict = {
'cidr': '255.255.255.0',
'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
'ips': [fake_network_cache_model.new_ip(dict(address='2.2.2.2'))],
'routes': [fake_network_cache_model.new_route()],
'version': 4,
'gateway': fake_network_cache_model.new_ip(
dict(address='3.3.3.3'))}
subnet = model.Subnet.hydrate(subnet_dict)
self.assertEqual(subnet['cidr'], '255.255.255.0')
self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
dict(address='1.1.1.1'))])
self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
self.assertEqual(subnet['ips'], [fake_network_cache_model.new_ip(
dict(address='2.2.2.2'))])
self.assertEqual(subnet['routes'], [
fake_network_cache_model.new_route()])
self.assertEqual(subnet['version'], 4)
class NetworkTests(test.TestCase):
def test_create_network(self):
network = fake_network_cache_model.new_network()
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
def test_add_subnet(self):
network = fake_network_cache_model.new_network()
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in xrange(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_hydrate(self):
new_network = dict(
id=1,
bridge='br0',
label='public',
subnets=[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
network = model.Network.hydrate(fake_network_cache_model.new_network())
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
class VIFTests(test.TestCase):
def test_create_vif(self):
vif = fake_network_cache_model.new_vif()
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
ips = [fake_network_cache_model.new_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
dict(address='10.10.0.3'))] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
floating_ips = vif.floating_ips()
self.assertEqual(floating_ips, ['192.168.1.1'])
def test_vif_get_labeled_ips(self):
vif = fake_network_cache_model.new_vif()
labeled_ips = vif.labeled_ips()
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 2,
'network_label': 'public'}
self.assertEqual(labeled_ips, ip_dict)
def test_hydrate(self):
new_vif = dict(
id=1,
address='127.0.0.1',
network=fake_network_cache_model.new_network())
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
class NetworkInfoTests(test.TestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address':'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 4)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
{'address':'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address':'bb:bb:bb:bb:bb:bb'})])
deserialized = model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3'})] * 4)
| apache-2.0 |
alexschiller/osf.io | website/addons/mendeley/tests/utils.py | 4 | 16238 | # -*- coding: utf-8 -*-
from framework.mongo import storage, set_up_storage
from website.addons.base.testing import OAuthAddonTestCaseMixin, AddonTestCase
from website.addons.base.testing.utils import MockFolder
from website.addons.mendeley.tests.factories import MendeleyAccountFactory
from website.addons.mendeley.model import Mendeley
from website.addons.mendeley import MODELS
from json import dumps
class MendeleyTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'mendeley'
ExternalAccountFactory = MendeleyAccountFactory
Provider = Mendeley
def set_node_settings(self, settings):
super(MendeleyTestCase, self).set_node_settings(settings)
settings.list_id = MockFolder().json['id']
settings.external_account = self.external_account
settings.save()
def init_storage():
set_up_storage(MODELS, storage_class=storage.MongoStorage)
mock_responses = {
'folders': [
{
"id": "4901a8f5-9840-49bf-8a17-bdb3d5900417",
"name": "subfolder",
"created": "2015-02-13T20:34:42.000Z",
"modified": "2015-02-13T20:34:44.000Z"
},
{
"id": "a6b12ebf-bd07-4f4e-ad73-f9704555f032",
"name": "subfolder2",
"created": "2015-02-13T20:34:42.000Z",
"modified": "2015-02-13T20:34:44.000Z",
"parent_id": "4901a8f5-9840-49bf-8a17-bdb3d5900417"
},
{
"id": "e843da05-8818-47c2-8c37-41eebfc4fe3f",
"name": "subfolder3",
"created": "2015-02-17T15:27:13.000Z",
"modified": "2015-02-17T15:27:13.000Z",
"parent_id": "a6b12ebf-bd07-4f4e-ad73-f9704555f032"
}
],
'documents': [
{
"id": "547a1215-efdb-36d2-93b2-e3ef8991264f",
"title": "Cloud Computing",
"type": "journal",
"authors": [
{
"first_name": "Shivaji P",
"last_name": "Mirashe"
},
{
"first_name": "N V",
"last_name": "Kalyankar"
}
],
"year": 2010,
"source": "Communications of the ACM",
"identifiers": {
"issn": "03621340",
"doi": "10.1145/358438.349303",
"pmid": "22988693",
"arxiv": "1003.4074",
"isbn": "1-58113-199-2"
},
"created": "2015-02-13T18:17:47.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:44.000Z",
"abstract": "Computing as you know it is about to change, your applications and documents are going to move from the desktop into the cloud. I'm talking about cloud computing, where applications and files are hosted on a \"cloud\" consisting of thousands of computers and servers, all linked together and accessible via the Internet. With cloud computing, everything you do is now web based instead of being desktop based. You can access all your programs and documents from any computer that's connected to the Internet. How will cloud computing change the way you work? For one thing, you're no longer tied to a single computer. You can take your work anywhere because it's always accessible via the web. In addition, cloud computing facilitates group collaboration, as all group members can access the same programs and documents from wherever they happen to be located. Cloud computing might sound far-fetched, but chances are you're already using some cloud applications. If you're using a web-based email program, such as Gmail or Hotmail, you're computing in the cloud. If you're using a web-based application such as Google Calendar or Apple Mobile Me, you're computing in the cloud. If you're using a file- or photo-sharing site, such as Flickr or Picasa Web Albums, you're computing in the cloud. It's the technology of the future, available to use today."
},
{
"id": "5e95a1a9-d789-3576-9943-35eee8e59ea9",
"title": "The Google file system",
"type": "generic",
"authors": [
{
"first_name": "Sanjay",
"last_name": "Ghemawat"
},
{
"first_name": "Howard",
"last_name": "Gobioff"
},
{
"first_name": "Shun-Tak",
"last_name": "Leung"
}
],
"year": 2003,
"source": "ACM SIGOPS Operating Systems Review",
"identifiers": {
"pmid": "191",
"issn": "01635980"
},
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:44.000Z",
"abstract": "We have designed and implemented the Google File System, a scalable distributed file system for large distributed data-intensive applications. It provides fault tolerance while running on inexpensive commodity hardware, and it delivers high aggregate performance to a large number of clients. While sharing many of the same goals as previous distributed file systems, our design has been driven by observations of our application workloads and technological environment, both current and anticipated, that reflect a marked departure from some earlier file system assumptions. This has led us to reexamine traditional choices and explore radically different design points. The file system has successfully met our storage needs. It is widely deployed within Google as the storage platform for the generation and processing of data used by our service as well as research and development efforts that require large data sets. The largest cluster to date provides hundreds of terabytes of storage across thousands of disks on over a thousand machines, and it is concurrently accessed by hundreds of clients. In this paper, we present file system interface extensions designed to support distributed applications, discuss many aspects of our design, and report measurements from both micro-benchmarks and real world use."
},
{
"id": "3480056e-fe4d-339b-afed-4312d03739a4",
"title": "Above the clouds: A Berkeley view of cloud computing",
"type": "journal",
"authors": [
{
"first_name": "M",
"last_name": "Armbrust"
},
{
"first_name": "A",
"last_name": "Fox"
},
{
"first_name": "R",
"last_name": "Griffith"
},
{
"first_name": "AD",
"last_name": "Joseph"
},
{
"last_name": "RH"
}
],
"year": 2009,
"source": " University of California, Berkeley, Tech. Rep. UCB ",
"identifiers": {
"pmid": "11242594",
"arxiv": "0521865719 9780521865715"
},
"keywords": [
"cloud computing",
"distributed system economics",
"internet datacenters",
"utility computing"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:45.000Z",
"abstract": "Cloud Computing, the long-held dream of computing as a utility, has the potential to transform a large part of the IT industry, making software even more attractive as a service and shaping the way IT hardware is designed and purchased. Developers with innovative ideas for new Internet services no longer require the large capital outlays in hardware to deploy their service or the human expense to operate it. They need not be concerned about over- provisioning for a service whose popularity does not meet their predictions, thus wasting costly resources, or under- provisioning for one that becomes wildly popular, thus missing potential customers and revenue. Moreover, companies with large batch-oriented tasks can get results as quickly as their programs can scale, since using 1000 servers for one hour costs no more than using one server for 1000 hlarge scale, is unprecedented in the history of IT. "
},
{
"id": "e917dd51-8b94-3748-810b-cafa2accc18a",
"title": "Toward the next generation of recommender systems: A survey of the state-of-the-art and possible extensions",
"type": "generic",
"authors": [
{
"first_name": "Gediminas",
"last_name": "Adomavicius"
},
{
"first_name": "Alexander",
"last_name": "Tuzhilin"
}
],
"year": 2005,
"source": "IEEE Transactions on Knowledge and Data Engineering",
"identifiers": {
"issn": "10414347",
"pmid": "1423975",
"arxiv": "3"
},
"keywords": [
"Collaborative filtering",
"Extensions to recommander systems",
"Rating estimation methods",
"Recommander systems"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:45.000Z",
"abstract": " This paper presents an overview of the field of recommender systems and describes the current generation of recommendation methods that are usually classified into the following three main categories: content-based, collaborative, and hybrid recommendation approaches. This paper also describes various limitations of current recommendation methods and discusses possible extensions that can improve recommendation capabilities and make recommender systems applicable to an even broader range of applications. These extensions include, among others, an improvement of understanding of users and items, incorporation of the contextual information into the recommendation process, support for multicriteria ratings, and a provision of more flexible and less intrusive types of recommendations."
},
{
"id": "8cd60008-888a-3212-966f-29d481b4b7b7",
"title": "An Introduction to Information Retrieval",
"type": "patent",
"authors": [
{
"first_name": "Christopher D.",
"last_name": "Manning"
},
{
"first_name": "Prabhakar",
"last_name": "Raghavan"
}
],
"year": 2009,
"source": "Online",
"identifiers": {
"issn": "13864564",
"doi": "10.1109/LPT.2009.2020494",
"pmid": "10575050",
"arxiv": "0521865719 9780521865715",
"isbn": "0521865719"
},
"keywords": [
"keyword"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-17T15:27:14.000Z",
"abstract": "Class-tested and coherent, this groundbreaking new textbook teaches web-era information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. Written from a computer science perspective by three leading experts in the field, it gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective. Although originally designed as the primary text for a graduate or advanced undergraduate course in information retrieval, the book will also create a buzz for researchers and professionals alike."
},
{
"id": "de25a64f-493b-330e-a48c-4089bab938f5",
"title": "Learning of ontologies for the web: The analysis of existent approaches",
"type": "journal",
"authors": [
{
"first_name": "Borys",
"last_name": "Omelayenko"
}
],
"year": 2001,
"source": "CEUR Workshop Proceedings",
"identifiers": {
"issn": "16130073"
},
"created": "2015-02-13T18:17:52.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:43.000Z",
"abstract": "The next generation of the Web, called Semantic Web, has to improve\\nthe Web with semantic (ontological) page annotations to enable knowledge-level\\nquerying and searches. Manual construction of these ontologies will\\nrequire tremendous efforts that force future integration of machine\\nlearning with knowledge acquisition to enable highly automated ontology\\nlearning. In the paper we present the state of the-art in the field\\nof ontology learning from the Web to see how it can contribute to\\nthe task..."
}
]
}
mock_responses = {k:dumps(v) for k,v in mock_responses.iteritems()}
| apache-2.0 |
salamer/django | tests/admin_ordering/tests.py | 279 | 6702 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from django.test import RequestFactory, TestCase
from .models import (
Band, DynOrderingBandAdmin, Song, SongInlineDefaultOrdering,
SongInlineNewOrdering,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
def has_module_perms(self, module):
return True
request = MockRequest()
request.user = MockSuperUser()
site = admin.AdminSite()
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.get_queryset uses the ordering we define
in ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
self.request_factory = RequestFactory()
Band.objects.bulk_create([
Band(name='Aerosmith', bio='', rank=3),
Band(name='Radiohead', bio='', rank=1),
Band(name='Van Halen', bio='', rank=2),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
def test_dynamic_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering dynamically.
"""
super_user = User.objects.create(username='admin', is_superuser=True)
other_user = User.objects.create(username='other')
request = self.request_factory.get('/')
request.user = super_user
ma = DynOrderingBandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
request.user = other_user
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.get_queryset uses the ordering we
define in InlineModelAdmin.
"""
def setUp(self):
self.band = Band.objects.create(name='Aerosmith', bio='', rank=3)
Song.objects.bulk_create([
Song(band=self.band, name='Pink', duration=235),
Song(band=self.band, name='Dude (Looks Like a Lady)', duration=264),
Song(band=self.band, name='Jaded', duration=214),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names)
class TestRelatedFieldsAdminOrdering(TestCase):
def setUp(self):
self.b1 = Band.objects.create(name='Pink Floyd', bio='', rank=1)
self.b2 = Band.objects.create(name='Foo Fighters', bio='', rank=5)
# we need to register a custom ModelAdmin (instead of just using
# ModelAdmin) because the field creator tries to find the ModelAdmin
# for the related model
class SongAdmin(admin.ModelAdmin):
pass
site.register(Song, SongAdmin)
def tearDown(self):
site.unregister(Song)
if Band in site._registry:
site.unregister(Band)
def check_ordering_of_field_choices(self, correct_ordering):
fk_field = site._registry[Song].formfield_for_foreignkey(Song.band.field)
m2m_field = site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field)
self.assertListEqual(list(fk_field.queryset), correct_ordering)
self.assertListEqual(list(m2m_field.queryset), correct_ordering)
def test_no_admin_fallback_to_model_ordering(self):
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_with_no_ordering_fallback_to_model_ordering(self):
class NoOrderingBandAdmin(admin.ModelAdmin):
pass
site.register(Band, NoOrderingBandAdmin)
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_ordering_beats_model_ordering(self):
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.register(Band, StaticOrderingBandAdmin)
# should be ordered by rank (defined by the ModelAdmin)
self.check_ordering_of_field_choices([self.b1, self.b2])
def test_custom_queryset_still_wins(self):
"""Test that custom queryset has still precedence (#21405)"""
class SongAdmin(admin.ModelAdmin):
# Exclude one of the two Bands from the querysets
def formfield_for_foreignkey(self, db_field, **kwargs):
if db_field.name == 'band':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
def formfield_for_manytomany(self, db_field, **kwargs):
if db_field.name == 'other_interpreters':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.unregister(Song)
site.register(Song, SongAdmin)
site.register(Band, StaticOrderingBandAdmin)
self.check_ordering_of_field_choices([self.b2])
| bsd-3-clause |
django-ses/django-ses | django_ses/management/commands/get_ses_statistics.py | 1 | 2230 | #!/usr/bin/env python
from collections import defaultdict
import boto3
from django.core.management.base import BaseCommand
from django_ses import settings
from django_ses.models import SESStat
from django_ses.views import stats_to_list
def stat_factory():
return {
'delivery_attempts': 0,
'bounces': 0,
'complaints': 0,
'rejects': 0,
}
class Command(BaseCommand):
"""
Get SES sending statistic and store the result, grouped by date.
"""
def handle(self, *args, **options):
connection = boto3.client(
'ses',
aws_access_key_id=settings.ACCESS_KEY,
aws_secret_access_key=settings.SECRET_KEY,
region_name=settings.AWS_SES_REGION_NAME,
endpoint_url=settings.AWS_SES_REGION_ENDPOINT_URL,
)
stats = connection.get_send_statistics()
data_points = stats_to_list(stats, localize=False)
stats_dict = defaultdict(stat_factory)
for data in data_points:
attempts = int(data['DeliveryAttempts'])
bounces = int(data['Bounces'])
complaints = int(data['Complaints'])
rejects = int(data['Rejects'])
date = data['Timestamp'].date()
stats_dict[date]['delivery_attempts'] += attempts
stats_dict[date]['bounces'] += bounces
stats_dict[date]['complaints'] += complaints
stats_dict[date]['rejects'] += rejects
for k, v in stats_dict.items():
stat, created = SESStat.objects.get_or_create(
date=k,
defaults={
'delivery_attempts': v['delivery_attempts'],
'bounces': v['bounces'],
'complaints': v['complaints'],
'rejects': v['rejects'],
})
# If statistic is not new, modify data if values are different
if not created and stat.delivery_attempts != v['delivery_attempts']:
stat.delivery_attempts = v['delivery_attempts']
stat.bounces = v['bounces']
stat.complaints = v['complaints']
stat.rejects = v['rejects']
stat.save()
| mit |
apanju/odoo | addons/calendar/contacts.py | 389 | 1414 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class calendar_contacts(osv.osv):
_name = 'calendar.contacts'
_columns = {
'user_id': fields.many2one('res.users','Me'),
'partner_id': fields.many2one('res.partner','Employee',required=True, domain=[]),
'active':fields.boolean('active'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'active' : True,
} | agpl-3.0 |
SphinxKnight/kuma | kuma/wiki/tests/test_views_code.py | 1 | 6585 | import pytest
from django.conf import settings
from waffle.testutils import override_switch
from kuma.attachments.models import Attachment
from kuma.attachments.tests import make_test_file
from kuma.core.urlresolvers import reverse
from . import normalize_html
from ..models import Revision
@pytest.fixture
def code_sample_doc(root_doc, wiki_user):
sample_page = """
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
{{ EmbedLiveSample('sample1') }}
"""
root_doc.current_revision = Revision.objects.create(
document=root_doc, creator=wiki_user, content=sample_page)
root_doc.save()
return root_doc
@pytest.mark.parametrize('domain', ('HOST', 'ORIGIN'))
def test_code_sample(code_sample_doc, client, settings, domain):
"""The raw source for a document can be requested."""
url = reverse('wiki.code_sample',
args=[code_sample_doc.slug, 'sample1'])
setattr(settings, 'ATTACHMENT_' + domain, 'testserver')
with override_switch('application_ACAO', True):
response = client.get(
url,
HTTP_HOST='testserver',
HTTP_IF_NONE_MATCH='"some-old-etag"'
)
assert response.status_code == 200
assert response['Access-Control-Allow-Origin'] == '*'
assert 'Last-Modified' not in response
assert 'ETag' in response
assert 'public' in response['Cache-Control']
assert 'max-age=86400' in response['Cache-Control']
assert response.content.startswith(b'<!DOCTYPE html>')
normalized = normalize_html(response.content)
expected = (
'<meta charset="utf-8">'
'<link href="%sbuild/styles/samples.css"'
' rel="stylesheet" type="text/css">'
'<style type="text/css">.some-css { color: red; }</style>'
'<title>Root Document - sample1 - code sample</title>'
'Some HTML'
'<script>window.alert("HI THERE")</script>'
% settings.STATIC_URL)
assert normalized == expected
def test_code_sample_host_not_allowed(code_sample_doc, settings, client):
"""Users are not allowed to view samples on a restricted domain."""
url = reverse('wiki.code_sample',
args=[code_sample_doc.slug, 'sample1'])
host = 'testserver'
assert settings.ATTACHMENT_HOST != host
assert settings.ATTACHMENT_ORIGIN != host
response = client.get(url, HTTP_HOST=host)
assert response.status_code == 403
def test_code_sample_host_allowed(code_sample_doc, settings, client):
"""Users are allowed to view samples on an allowed domain."""
host = 'sampleserver'
url = reverse('wiki.code_sample',
args=[code_sample_doc.slug, 'sample1'])
settings.ATTACHMENT_HOST = host
settings.ALLOWED_HOSTS.append(host)
response = client.get(url, HTTP_HOST=host)
assert response.status_code == 200
assert 'public' in response['Cache-Control']
assert 'max-age=86400' in response['Cache-Control']
# The pytest-django urls marker also resets urlconf caches after the test
@pytest.mark.urls(settings.ROOT_URLCONF)
def test_code_sample_host_restricted_host(code_sample_doc, constance_config,
settings, client):
"""Users are allowed to view samples on the attachment domain."""
url = reverse('wiki.code_sample',
args=[code_sample_doc.slug, 'sample1'])
host = 'sampleserver'
settings.ALLOWED_HOSTS.append(host)
settings.ATTACHMENT_HOST = host
settings.ENABLE_RESTRICTIONS_BY_HOST = True
# Setting the KUMASCRIPT_TIMEOUT to a non-zero value forces kumascript
# rendering so we ensure that path is tested for these requests that use
# a restricted urlconf environment.
constance_config.KUMASCRIPT_TIMEOUT = 1
response = client.get(url, HTTP_HOST=host)
assert response.status_code == 200
assert 'public' in response['Cache-Control']
assert 'max-age=86400' in response['Cache-Control']
def test_raw_code_sample_file(code_sample_doc, constance_config,
wiki_user, admin_client, settings):
# Upload an attachment
upload_url = reverse('attachments.edit_attachment',
kwargs={'document_path': code_sample_doc.slug})
file_for_upload = make_test_file(content='Something something unique')
post_data = {
'title': 'An uploaded file',
'description': 'A unique experience for your file serving needs.',
'comment': 'Yadda yadda yadda',
'file': file_for_upload,
}
constance_config.WIKI_ATTACHMENT_ALLOWED_TYPES = 'text/plain'
response = admin_client.post(upload_url, data=post_data)
assert response.status_code == 302
edit_url = reverse('wiki.edit', args=(code_sample_doc.slug,))
assert response.url == edit_url
# Add a relative reference to the sample content
attachment = Attachment.objects.get(title='An uploaded file')
filename = attachment.current_revision.filename
url_css = 'url("files/%(attachment_id)s/%(filename)s")' % {
'attachment_id': attachment.id,
'filename': filename,
}
new_content = code_sample_doc.current_revision.content.replace(
'color: red', url_css)
code_sample_doc.current_revision = Revision.objects.create(
document=code_sample_doc, creator=wiki_user, content=new_content)
code_sample_doc.save()
# URL is in the sample
sample_url = reverse('wiki.code_sample',
args=[code_sample_doc.slug, 'sample1'])
settings.ATTACHMENT_HOST = 'testserver'
response = admin_client.get(sample_url)
assert response.status_code == 200
assert url_css.encode('utf-8') in response.content
assert 'public' in response['Cache-Control']
assert 'max-age=86400' in response['Cache-Control']
# Getting the URL redirects to the attachment
file_url = reverse('wiki.raw_code_sample_file',
args=(code_sample_doc.slug, 'sample1', attachment.id,
filename))
response = admin_client.get(file_url)
assert response.status_code == 302
assert response.url == attachment.get_file_url()
assert not response.has_header('Vary')
assert 'Cache-Control' in response
assert 'public' in response['Cache-Control']
assert 'max-age=432000' in response['Cache-Control']
| mpl-2.0 |
maxspencer/pdfrefiner | refiner/input/pdftohtml.py | 1 | 1508 | import subprocess
import re
import tempfile
import bs4
import sys
from refiner.input.model import InputDocument, InputPage, Font, Text
def parse(string, replacements=[]):
for r in replacements:
string = re.sub(r[0], r[1], string)
soup = bs4.BeautifulSoup(string)
document = InputDocument()
fontspec_elements = soup.find_all('fontspec')
for e in fontspec_elements:
font = Font(e['id'], e['family'], e['size'], e['color'])
document.fonts[font.id] = font
page_elements = soup.find_all('page')
for e in page_elements:
page = InputPage(int(e['number']), int(e['width']), int(e['height']))
document.pages.append(page)
for te in e.find_all('text'):
string = ''.join(te.strings)
text = Text(
string,
page,
int(te['left']),
int(te['top']),
int(te['width']),
int(te['height']),
font=document.fonts.get(te['font'], None)
)
page.texts.append(text)
return document
def parse_file(path):
with tempfile.NamedTemporaryFile(mode='w+', suffix='.xml') as xml_file:
args = ['pdftohtml', '-xml', path, xml_file.name]
subprocess.check_call(args)
xml = xml_file.read()
return parse(xml)
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
d = parse(f.read())
for t in d.pages[3].texts:
print((str(t.box), str(t)))
| mit |
Ruide/angr-dev | cle/cle/backends/elf/regions.py | 1 | 1883 | from ..regions import Segment, Section
class ELFSegment(Segment):
"""
Represents a segment for the ELF format.
"""
def __init__(self, readelf_seg):
self.flags = readelf_seg.header.p_flags
super(ELFSegment, self).__init__(readelf_seg.header.p_offset,
readelf_seg.header.p_vaddr,
readelf_seg.header.p_filesz,
readelf_seg.header.p_memsz)
@property
def is_readable(self):
return self.flags & 4 != 0
@property
def is_writable(self):
return self.flags & 2 != 0
@property
def is_executable(self):
return self.flags & 1 != 0
class ELFSection(Section):
SHF_WRITE = 0x1
SHF_ALLOC = 0x2
SHF_EXECINSTR = 0x4
SHF_STRINGS = 0x20
def __init__(self, readelf_sec, remap_offset=0):
super(ELFSection, self).__init__(
readelf_sec.name,
readelf_sec.header.sh_offset,
readelf_sec.header.sh_addr + remap_offset,
readelf_sec.header.sh_size
)
self.type = readelf_sec.header.sh_type
self.entsize = readelf_sec.header.sh_entsize
self.flags = readelf_sec.header.sh_flags
self.link = readelf_sec.header.sh_link
self.info = readelf_sec.header.sh_info
self.align = readelf_sec.header.sh_addralign
self.remap_offset = remap_offset
@property
def is_readable(self):
return True
@property
def is_writable(self):
return self.flags & self.SHF_WRITE != 0
@property
def occupies_memory(self):
return self.flags & self.SHF_ALLOC != 0
@property
def is_executable(self):
return self.flags & self.SHF_EXECINSTR != 0
@property
def is_strings(self):
return self.flags & self.SHF_STRINGS != 0
| bsd-2-clause |
GeoNode/geonode | geonode/people/signals.py | 3 | 5613 | #########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""Signal handlers pertaining to the people app
Some of these signals deal with authentication related workflows.
"""
import logging
import traceback
from uuid import uuid1
from allauth.account.models import EmailAddress
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.db.models import Q
from geonode.base.auth import (
get_or_create_token,
delete_old_tokens,
set_session_token,
remove_session_token)
from geonode.groups.models import GroupProfile
from geonode.groups.conf import settings as groups_settings
from geonode.notifications_helper import send_notification
from .adapters import get_data_extractor
logger = logging.getLogger(__name__)
def _add_user_to_registered_members(user):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
groupprofile = GroupProfile.objects.filter(slug=group_name).first()
if groupprofile:
groupprofile.join(user)
def _remove_user_from_registered_members(user):
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME:
group_name = groups_settings.REGISTERED_MEMBERS_GROUP_NAME
groupprofile = GroupProfile.objects.filter(slug=group_name).first()
if groupprofile:
groupprofile.leave(user)
def do_login(sender, user, request, **kwargs):
"""
Take action on user login. Generate a new user access_token to be shared
with GeoServer, and store it into the request.session
"""
if user and user.is_authenticated:
token = None
try:
token = get_or_create_token(user)
except Exception:
u = uuid1()
token = u.hex
tb = traceback.format_exc()
logger.debug(tb)
set_session_token(request.session, token)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'login':
_add_user_to_registered_members(user)
def do_logout(sender, user, request, **kwargs):
if 'access_token' in request.session:
try:
delete_old_tokens(user)
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
remove_session_token(request.session)
request.session.modified = True
def update_user_email_addresses(sender, **kwargs):
sociallogin = kwargs["sociallogin"]
user = sociallogin.user
extractor = get_data_extractor(sociallogin.account.provider)
try:
sociallogin_email = extractor.extract_email(
sociallogin.account.extra_data)
except NotImplementedError:
sociallogin_email = None
if sociallogin_email is not None:
try:
EmailAddress.objects.add_email(
request=None, user=user, email=sociallogin_email, confirm=False)
except IntegrityError:
logging.exception(msg=f"Could not add email address {sociallogin_email} to user {user}")
def notify_admins_new_signup(sender, **kwargs):
staff = get_user_model().objects.filter(Q(is_active=True) & (Q(is_staff=True) | Q(is_superuser=True)))
send_notification(
users=staff,
label="account_approve",
extra_context={"from_user": kwargs["user"]}
)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'registration':
_add_user_to_registered_members(kwargs["user"])
def profile_post_save(instance, sender, **kwargs):
"""
Make sure the user belongs by default to the anonymous and contributors groups.
This will make sure that anonymous and contributors permissions will be granted to the new users.
"""
from django.contrib.auth.models import Group
created = kwargs.get('created', False)
if created:
anon_group, _ = Group.objects.get_or_create(name='anonymous')
instance.groups.add(anon_group)
is_anonymous = instance.username == 'AnonymousUser'
if Group.objects.filter(name='contributors').count() and not (instance.is_staff or instance.is_superuser or is_anonymous):
cont_group = Group.objects.get(name='contributors')
instance.groups.add(cont_group)
if groups_settings.AUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_AT == 'activation':
became_active = instance.is_active and (not instance._previous_active_state or created)
if became_active:
_add_user_to_registered_members(instance)
elif not instance.is_active:
_remove_user_from_registered_members(instance)
# do not create email, when user-account signup code is in use
if getattr(instance, '_disable_account_creation', False):
return
| gpl-3.0 |
michaelBenin/sqlalchemy | test/orm/test_cascade.py | 3 | 97256 |
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy import Integer, String, ForeignKey, Sequence, \
exc as sa_exc
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, \
sessionmaker, class_mapper, backref, Session, util as orm_util,\
configure_mappers
from sqlalchemy.orm.attributes import instance_state
from sqlalchemy.orm import attributes, exc as orm_exc, object_mapper
from sqlalchemy import testing
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
class CascadeArgTest(fixtures.MappedTest):
run_inserts = None
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
)
Table('addresses', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('email_address', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
class Address(cls.Basic):
pass
def test_delete_with_passive_deletes_all(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties={
'addresses': relationship(Address,
passive_deletes="all", cascade="all, delete-orphan")
})
mapper(Address, addresses)
assert_raises_message(
sa_exc.ArgumentError,
"On User.addresses, can't set passive_deletes='all' "
"in conjunction with 'delete' or 'delete-orphan' cascade",
configure_mappers
)
def test_delete_orphan_without_delete(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
assert_raises_message(
sa_exc.SAWarning,
"The 'delete-orphan' cascade option requires 'delete'.",
relationship, Address, cascade="save-update, delete-orphan"
)
def test_bad_cascade(self):
addresses, Address = self.tables.addresses, self.classes.Address
mapper(Address, addresses)
assert_raises_message(
sa_exc.ArgumentError,
r"Invalid cascade option\(s\): 'fake', 'fake2'",
relationship, Address, cascade="fake, all, delete-orphan, fake2"
)
def test_cascade_repr(self):
eq_(
repr(orm_util.CascadeOptions("all, delete-orphan")),
"CascadeOptions('delete,delete-orphan,expunge,"
"merge,refresh-expire,save-update')"
)
def test_cascade_immutable(self):
assert isinstance(
orm_util.CascadeOptions("all, delete-orphan"),
frozenset)
def test_cascade_assignable(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
rel = relationship(Address)
eq_(rel.cascade, set(['save-update', 'merge']))
rel.cascade = "save-update, merge, expunge"
eq_(rel.cascade, set(['save-update', 'merge', 'expunge']))
mapper(User, users, properties={
'addresses': rel
})
am = mapper(Address, addresses)
configure_mappers()
eq_(rel.cascade, set(['save-update', 'merge', 'expunge']))
assert ("addresses", User) not in am._delete_orphans
rel.cascade = "all, delete, delete-orphan"
assert ("addresses", User) in am._delete_orphans
eq_(rel.cascade,
set(['delete', 'delete-orphan', 'expunge', 'merge',
'refresh-expire', 'save-update'])
)
class O2MCascadeDeleteOrphanTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
)
Table('addresses', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('email_address', String(50), nullable=False),
)
Table('orders', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('description', String(30)),
)
Table("dingalings", metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('address_id', Integer, ForeignKey('addresses.id')),
Column('data', String(30))
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Order(cls.Comparable):
pass
class Dingaling(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
users, Dingaling, Order, User, dingalings, Address, orders, addresses = (cls.tables.users,
cls.classes.Dingaling,
cls.classes.Order,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.orders,
cls.tables.addresses)
mapper(Address, addresses)
mapper(Order, orders)
mapper(User, users, properties={
'addresses':relationship(Address,
cascade='all, delete-orphan', backref='user'),
'orders':relationship(Order,
cascade='all, delete-orphan', order_by=orders.c.id)
})
mapper(Dingaling, dingalings, properties={
'address' : relationship(Address)
})
def test_list_assignment_new(self):
User, Order = self.classes.User, self.classes.Order
sess = Session()
u = User(name='jack', orders=[
Order(description='order 1'),
Order(description='order 2')])
sess.add(u)
sess.commit()
eq_(u, User(name='jack',
orders=[Order(description='order 1'),
Order(description='order 2')]))
def test_list_assignment_replace(self):
User, Order = self.classes.User, self.classes.Order
sess = Session()
u = User(name='jack', orders=[
Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
u.orders=[Order(description="order 3"), Order(description="order 4")]
sess.commit()
eq_(u, User(name='jack',
orders=[Order(description="order 3"),
Order(description="order 4")]))
# order 1, order 2 have been deleted
eq_(sess.query(Order).order_by(Order.id).all(),
[Order(description="order 3"), Order(description="order 4")])
def test_standalone_orphan(self):
Order = self.classes.Order
sess = Session()
o5 = Order(description="order 5")
sess.add(o5)
assert_raises(sa_exc.DBAPIError, sess.flush)
def test_save_update_sends_pending(self):
"""test that newly added and deleted collection items are
cascaded on save-update"""
Order, User = self.classes.Order, self.classes.User
sess = sessionmaker(expire_on_commit=False)()
o1, o2, o3 = Order(description='o1'), Order(description='o2'), \
Order(description='o3')
u = User(name='jack', orders=[o1, o2])
sess.add(u)
sess.commit()
sess.close()
u.orders.append(o3)
u.orders.remove(o1)
sess.add(u)
assert o1 in sess
assert o2 in sess
assert o3 in sess
sess.commit()
def test_remove_pending_from_collection(self):
User, Order = self.classes.User, self.classes.Order
sess = Session()
u = User(name='jack')
sess.add(u)
sess.commit()
o1 = Order()
u.orders.append(o1)
assert o1 in sess
u.orders.remove(o1)
assert o1 not in sess
def test_delete(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
sess = create_session()
u = User(name='jack',
orders=[Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
sess.flush()
sess.delete(u)
sess.flush()
assert users.count().scalar() == 0
assert orders.count().scalar() == 0
def test_delete_unloaded_collections(self):
"""Unloaded collections are still included in a delete-cascade
by default."""
User, addresses, users, Address = (self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address)
sess = create_session()
u = User(name='jack',
addresses=[Address(email_address="address1"),
Address(email_address="address2")])
sess.add(u)
sess.flush()
sess.expunge_all()
assert addresses.count().scalar() == 2
assert users.count().scalar() == 1
u = sess.query(User).get(u.id)
assert 'addresses' not in u.__dict__
sess.delete(u)
sess.flush()
assert addresses.count().scalar() == 0
assert users.count().scalar() == 0
def test_cascades_onlycollection(self):
"""Cascade only reaches instances that are still part of the
collection, not those that have been removed"""
User, Order, users, orders = (self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders)
sess = create_session()
u = User(name='jack',
orders=[Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
sess.flush()
o = u.orders[0]
del u.orders[0]
sess.delete(u)
assert u in sess.deleted
assert o not in sess.deleted
assert o in sess
u2 = User(name='newuser', orders=[o])
sess.add(u2)
sess.flush()
sess.expunge_all()
assert users.count().scalar() == 1
assert orders.count().scalar() == 1
eq_(sess.query(User).all(),
[User(name='newuser',
orders=[Order(description='someorder')])])
def test_cascade_nosideeffects(self):
"""test that cascade leaves the state of unloaded
scalars/collections unchanged."""
Dingaling, User, Address = (self.classes.Dingaling,
self.classes.User,
self.classes.Address)
sess = create_session()
u = User(name='jack')
sess.add(u)
assert 'orders' not in u.__dict__
sess.flush()
assert 'orders' not in u.__dict__
a = Address(email_address='foo@bar.com')
sess.add(a)
assert 'user' not in a.__dict__
a.user = u
sess.flush()
d = Dingaling(data='d1')
d.address_id = a.id
sess.add(d)
assert 'address' not in d.__dict__
sess.flush()
assert d.address is a
def test_cascade_delete_plusorphans(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
sess = create_session()
u = User(name='jack',
orders=[Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
sess.flush()
assert users.count().scalar() == 1
assert orders.count().scalar() == 2
del u.orders[0]
sess.delete(u)
sess.flush()
assert users.count().scalar() == 0
assert orders.count().scalar() == 0
def test_collection_orphans(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
sess = create_session()
u = User(name='jack',
orders=[Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
sess.flush()
assert users.count().scalar() == 1
assert orders.count().scalar() == 2
u.orders[:] = []
sess.flush()
assert users.count().scalar() == 1
assert orders.count().scalar() == 0
class O2MCascadeTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
)
Table('addresses', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('email_address', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
users, User, Address, addresses = (
cls.tables.users, cls.classes.User,
cls.classes.Address, cls.tables.addresses)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address, backref="user"),
})
def test_none_o2m_collection_assignment(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
u1 = User(name='u', addresses=[None])
s.add(u1)
eq_(u1.addresses, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection User.addresses",
s.commit
)
eq_(u1.addresses, [None])
def test_none_o2m_collection_append(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
u1 = User(name='u')
s.add(u1)
u1.addresses.append(None)
eq_(u1.addresses, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection User.addresses",
s.commit
)
eq_(u1.addresses, [None])
class O2MCascadeDeleteNoOrphanTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30))
)
Table('orders', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('description', String(30))
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Order(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, Order, orders, users = (cls.classes.User,
cls.classes.Order,
cls.tables.orders,
cls.tables.users)
mapper(User, users, properties = dict(
orders = relationship(
mapper(Order, orders), cascade="all")
))
def test_cascade_delete_noorphans(self):
User, Order, orders, users = (self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users)
sess = create_session()
u = User(name='jack',
orders=[Order(description='someorder'),
Order(description='someotherorder')])
sess.add(u)
sess.flush()
assert users.count().scalar() == 1
assert orders.count().scalar() == 2
del u.orders[0]
sess.delete(u)
sess.flush()
assert users.count().scalar() == 0
assert orders.count().scalar() == 1
class O2OSingleParentTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={'address'
: relationship(Address, backref=backref('user',
single_parent=True), uselist=False)})
def test_single_parent_raise(self):
User, Address = self.classes.User, self.classes.Address
a1 = Address(email_address='some address')
u1 = User(name='u1', address=a1)
assert_raises(sa_exc.InvalidRequestError, Address,
email_address='asd', user=u1)
a2 = Address(email_address='asd')
u1.address = a2
assert u1.address is not a1
assert a1.user is None
class O2OSingleParentNoFlushTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
)
Table('addresses', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id'), nullable=False),
Column('email_address', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={'address'
: relationship(Address, backref=backref('user',
single_parent=True, cascade="all, delete-orphan"),
uselist=False)})
def test_replace_attribute_no_flush(self):
# test [ticket:2921]
User, Address = self.classes.User, self.classes.Address
a1 = Address(email_address='some address')
u1 = User(name='u1', address=a1)
sess = Session()
sess.add(u1)
sess.commit()
a2 = Address(email_address='asdf')
sess.add(a2)
u1.address = a2
class NoSaveCascadeFlushTest(_fixtures.FixtureTest):
"""Test related item not present in session, commit proceeds."""
run_inserts = None
def _one_to_many_fixture(self, o2m_cascade=True,
m2o_cascade=True,
o2m=False,
m2o=False,
o2m_cascade_backrefs=True,
m2o_cascade_backrefs=True):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
if o2m:
if m2o:
addresses_rel = {'addresses':relationship(
Address,
cascade_backrefs=o2m_cascade_backrefs,
cascade=o2m_cascade and 'save-update' or '',
backref=backref('user',
cascade=m2o_cascade and 'save-update' or '',
cascade_backrefs=m2o_cascade_backrefs
)
)}
else:
addresses_rel = {'addresses':relationship(
Address,
cascade=o2m_cascade and 'save-update' or '',
cascade_backrefs=o2m_cascade_backrefs,
)}
user_rel = {}
elif m2o:
user_rel = {'user':relationship(User,
cascade=m2o_cascade and 'save-update' or '',
cascade_backrefs=m2o_cascade_backrefs
)}
addresses_rel = {}
else:
addresses_rel = {}
user_rel = {}
mapper(User, users, properties=addresses_rel)
mapper(Address, addresses, properties=user_rel)
def _many_to_many_fixture(self, fwd_cascade=True,
bkd_cascade=True,
fwd=False,
bkd=False,
fwd_cascade_backrefs=True,
bkd_cascade_backrefs=True):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
if fwd:
if bkd:
keywords_rel = {'keywords':relationship(
Keyword,
secondary=item_keywords,
cascade_backrefs=fwd_cascade_backrefs,
cascade=fwd_cascade and 'save-update' or '',
backref=backref('items',
cascade=bkd_cascade and 'save-update' or '',
cascade_backrefs=bkd_cascade_backrefs
)
)}
else:
keywords_rel = {'keywords':relationship(
Keyword,
secondary=item_keywords,
cascade=fwd_cascade and 'save-update' or '',
cascade_backrefs=fwd_cascade_backrefs,
)}
items_rel = {}
elif bkd:
items_rel = {'items':relationship(Item,
secondary=item_keywords,
cascade=bkd_cascade and 'save-update' or '',
cascade_backrefs=bkd_cascade_backrefs
)}
keywords_rel = {}
else:
keywords_rel = {}
items_rel = {}
mapper(Item, items, properties=keywords_rel)
mapper(Keyword, keywords, properties=items_rel)
def test_o2m_only_child_pending(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 in sess
sess.flush()
def test_o2m_only_child_transient(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=False, o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_o2m_only_child_persistent(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=False, o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
sess.expunge_all()
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_o2m_backref_child_pending(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 in sess
sess.flush()
def test_o2m_backref_child_transient(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True,
o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_o2m_backref_child_transient_nochange(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True,
o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
u1.addresses.append(a1)
sess.add(u1)
assert u1 in sess
assert a1 not in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
eq_(u1.addresses, [])
def test_o2m_backref_child_expunged(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True,
o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
sess.add(u1)
u1.addresses.append(a1)
sess.expunge(a1)
assert u1 in sess
assert a1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_o2m_backref_child_expunged_nochange(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True,
o2m_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
sess.add(u1)
u1.addresses.append(a1)
sess.expunge(a1)
assert u1 in sess
assert a1 not in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
eq_(u1.addresses, [])
def test_m2o_only_child_pending(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=False, m2o=True)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
assert u1 in sess
assert a1 in sess
sess.flush()
def test_m2o_only_child_transient(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=False, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
assert u1 not in sess
assert a1 in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2o_only_child_expunged(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=False, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
sess.expunge(u1)
assert u1 not in sess
assert a1 in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2o_backref_child_pending(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
assert u1 in sess
assert a1 in sess
sess.flush()
def test_m2o_backref_child_transient(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
assert u1 not in sess
assert a1 in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2o_backref_child_expunged(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
sess.expunge(u1)
assert u1 not in sess
assert a1 in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2o_backref_child_pending_nochange(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
assert u1 not in sess
assert a1 in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
# didn't get flushed
assert a1.user is None
def test_m2o_backref_child_expunged_nochange(self):
User, Address = self.classes.User, self.classes.Address
self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False)
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
a1 = Address(email_address='a1')
a1.user = u1
sess.add(a1)
sess.expunge(u1)
assert u1 not in sess
assert a1 in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
# didn't get flushed
assert a1.user is None
def test_m2m_only_child_pending(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 in sess
sess.flush()
def test_m2m_only_child_transient(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=False, fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2m_only_child_persistent(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=False, fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
sess.add(k1)
sess.flush()
sess.expunge_all()
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2m_backref_child_pending(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=True)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 in sess
sess.flush()
def test_m2m_backref_child_transient(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=True,
fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2m_backref_child_transient_nochange(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=True,
fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
i1.keywords.append(k1)
sess.add(i1)
assert i1 in sess
assert k1 not in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
eq_(i1.keywords, [])
def test_m2m_backref_child_expunged(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=True,
fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
sess.add(k1)
sess.flush()
sess.add(i1)
i1.keywords.append(k1)
sess.expunge(k1)
assert i1 in sess
assert k1 not in sess
assert_raises_message(
sa_exc.SAWarning, "not in session", sess.flush
)
def test_m2m_backref_child_expunged_nochange(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
self._many_to_many_fixture(fwd=True, bkd=True,
fwd_cascade=False)
sess = Session()
i1 = Item(description='i1')
k1 = Keyword(name='k1')
sess.add(k1)
sess.flush()
sess.add(i1)
i1.keywords.append(k1)
sess.expunge(k1)
assert i1 in sess
assert k1 not in sess
@testing.emits_warning(r'.*not in session')
def go():
sess.commit()
go()
eq_(i1.keywords, [])
class NoSaveCascadeBackrefTest(_fixtures.FixtureTest):
"""test that backrefs don't force save-update cascades to occur
when the cascade initiated from the forwards side."""
def test_unidirectional_cascade_o2m(self):
User, Order, users, orders = (self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders)
mapper(Order, orders)
mapper(User, users, properties = dict(
orders = relationship(
Order, backref=backref("user", cascade=None))
))
sess = create_session()
o1 = Order()
sess.add(o1)
u1 = User(orders=[o1])
assert u1 not in sess
assert o1 in sess
sess.expunge_all()
o1 = Order()
u1 = User(orders=[o1])
sess.add(o1)
assert u1 not in sess
assert o1 in sess
def test_unidirectional_cascade_m2o(self):
User, Order, users, orders = (self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders)
mapper(Order, orders, properties={
'user':relationship(User, backref=backref("orders", cascade=None))
})
mapper(User, users)
sess = create_session()
u1 = User()
sess.add(u1)
o1 = Order()
o1.user = u1
assert o1 not in sess
assert u1 in sess
sess.expunge_all()
u1 = User()
o1 = Order()
o1.user = u1
sess.add(u1)
assert o1 not in sess
assert u1 in sess
def test_unidirectional_cascade_m2m(self):
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Item, items, properties={'keywords'
: relationship(Keyword, secondary=item_keywords,
cascade='none', backref='items')})
mapper(Keyword, keywords)
sess = create_session()
i1 = Item()
k1 = Keyword()
sess.add(i1)
i1.keywords.append(k1)
assert i1 in sess
assert k1 not in sess
sess.expunge_all()
i1 = Item()
k1 = Keyword()
sess.add(i1)
k1.items.append(i1)
assert i1 in sess
assert k1 not in sess
class M2OCascadeDeleteOrphanTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('extra', metadata, Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('prefs_id', Integer, ForeignKey('prefs.id')))
Table('prefs', metadata, Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('data', String(40)))
Table(
'users',
metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(40)),
Column('pref_id', Integer, ForeignKey('prefs.id')),
Column('foo_id', Integer, ForeignKey('foo.id')),
)
Table('foo', metadata, Column('id', Integer, primary_key=True,
test_needs_autoincrement=True), Column('data',
String(40)))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Pref(cls.Comparable):
pass
class Extra(cls.Comparable):
pass
class Foo(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
extra, foo, users, Extra, Pref, User, prefs, Foo = (cls.tables.extra,
cls.tables.foo,
cls.tables.users,
cls.classes.Extra,
cls.classes.Pref,
cls.classes.User,
cls.tables.prefs,
cls.classes.Foo)
mapper(Extra, extra)
mapper(Pref, prefs, properties=dict(extra=relationship(Extra,
cascade='all, delete')))
mapper(User, users, properties=dict(pref=relationship(Pref,
lazy='joined', cascade='all, delete-orphan',
single_parent=True), foo=relationship(Foo))) # straight m2o
mapper(Foo, foo)
@classmethod
def insert_data(cls):
Pref, User, Extra = (cls.classes.Pref,
cls.classes.User,
cls.classes.Extra)
u1 = User(name='ed', pref=Pref(data="pref 1", extra=[Extra()]))
u2 = User(name='jack', pref=Pref(data="pref 2", extra=[Extra()]))
u3 = User(name="foo", pref=Pref(data="pref 3", extra=[Extra()]))
sess = create_session()
sess.add_all((u1, u2, u3))
sess.flush()
sess.close()
def test_orphan(self):
prefs, User, extra = (self.tables.prefs,
self.classes.User,
self.tables.extra)
sess = create_session()
assert prefs.count().scalar() == 3
assert extra.count().scalar() == 3
jack = sess.query(User).filter_by(name="jack").one()
jack.pref = None
sess.flush()
assert prefs.count().scalar() == 2
assert extra.count().scalar() == 2
def test_cascade_on_deleted(self):
"""test a bug introduced by r6711"""
Foo, User = self.classes.Foo, self.classes.User
sess = sessionmaker(expire_on_commit=True)()
u1 = User(name='jack', foo=Foo(data='f1'))
sess.add(u1)
sess.commit()
u1.foo = None
# the error condition relies upon
# these things being true
assert User.foo.dispatch._active_history is False
eq_(
attributes.get_history(u1, 'foo'),
([None], (), ())
)
sess.add(u1)
assert u1 in sess
sess.commit()
def test_save_update_sends_pending(self):
"""test that newly added and deleted scalar items are cascaded
on save-update"""
Pref, User = self.classes.Pref, self.classes.User
sess = sessionmaker(expire_on_commit=False)()
p1, p2 = Pref(data='p1'), Pref(data='p2')
u = User(name='jack', pref=p1)
sess.add(u)
sess.commit()
sess.close()
u.pref = p2
sess.add(u)
assert p1 in sess
assert p2 in sess
sess.commit()
def test_orphan_on_update(self):
prefs, User, extra = (self.tables.prefs,
self.classes.User,
self.tables.extra)
sess = create_session()
jack = sess.query(User).filter_by(name="jack").one()
p = jack.pref
e = jack.pref.extra[0]
sess.expunge_all()
jack.pref = None
sess.add(jack)
sess.add(p)
sess.add(e)
assert p in sess
assert e in sess
sess.flush()
assert prefs.count().scalar() == 2
assert extra.count().scalar() == 2
def test_pending_expunge(self):
Pref, User = self.classes.Pref, self.classes.User
sess = create_session()
someuser = User(name='someuser')
sess.add(someuser)
sess.flush()
someuser.pref = p1 = Pref(data='somepref')
assert p1 in sess
someuser.pref = Pref(data='someotherpref')
assert p1 not in sess
sess.flush()
eq_(sess.query(Pref).with_parent(someuser).all(),
[Pref(data="someotherpref")])
def test_double_assignment(self):
"""Double assignment will not accidentally reset the 'parent' flag."""
Pref, User = self.classes.Pref, self.classes.User
sess = create_session()
jack = sess.query(User).filter_by(name="jack").one()
newpref = Pref(data="newpref")
jack.pref = newpref
jack.pref = newpref
sess.flush()
eq_(sess.query(Pref).order_by(Pref.id).all(),
[Pref(data="pref 1"), Pref(data="pref 3"), Pref(data="newpref")])
class M2OCascadeDeleteOrphanTestTwo(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('t2id', Integer, ForeignKey('t2.id')))
Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('t3id', Integer, ForeignKey('t3.id')))
Table('t3', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
class T3(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
t2, T2, T3, t1, t3, T1 = (cls.tables.t2,
cls.classes.T2,
cls.classes.T3,
cls.tables.t1,
cls.tables.t3,
cls.classes.T1)
mapper(T1, t1, properties=dict(t2=relationship(T2,
cascade='all, delete-orphan', single_parent=True)))
mapper(T2, t2, properties=dict(t3=relationship(T3,
cascade='all, delete-orphan', single_parent=True,
backref=backref('t2', uselist=False))))
mapper(T3, t3)
def test_cascade_delete(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a')))
sess.add(x)
sess.flush()
sess.delete(x)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_deletes_orphans_onelevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b')))
sess.add(x2)
sess.flush()
x2.t2 = None
sess.delete(x2)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_deletes_orphans_twolevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a')))
sess.add(x)
sess.flush()
x.t2.t3 = None
sess.delete(x)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_finds_orphans_twolevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a')))
sess.add(x)
sess.flush()
x.t2.t3 = None
sess.flush()
eq_(sess.query(T1).all(), [T1()])
eq_(sess.query(T2).all(), [T2()])
eq_(sess.query(T3).all(), [])
def test_single_parent_raise(self):
T2, T1 = self.classes.T2, self.classes.T1
sess = create_session()
y = T2(data='T2a')
x = T1(data='T1a', t2=y)
assert_raises(sa_exc.InvalidRequestError, T1, data='T1b', t2=y)
def test_single_parent_backref(self):
T2, T3 = self.classes.T2, self.classes.T3
sess = create_session()
y = T3(data='T3a')
x = T2(data='T2a', t3=y)
# cant attach the T3 to another T2
assert_raises(sa_exc.InvalidRequestError, T2, data='T2b', t3=y)
# set via backref tho is OK, unsets from previous parent
# first
z = T2(data='T2b')
y.t2 = z
assert z.t3 is y
assert x.t3 is None
class M2OCascadeDeleteNoOrphanTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata, Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data',String(50)),
Column('t2id', Integer, ForeignKey('t2.id')))
Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data',String(50)),
Column('t3id', Integer, ForeignKey('t3.id')))
Table('t3', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
class T3(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
t2, T2, T3, t1, t3, T1 = (cls.tables.t2,
cls.classes.T2,
cls.classes.T3,
cls.tables.t1,
cls.tables.t3,
cls.classes.T1)
mapper(T1, t1, properties={'t2': relationship(T2, cascade="all")})
mapper(T2, t2, properties={'t3': relationship(T3, cascade="all")})
mapper(T3, t3)
def test_cascade_delete(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a')))
sess.add(x)
sess.flush()
sess.delete(x)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_cascade_delete_postappend_onelevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x1 = T1(data='t1', )
x2 = T2(data='t2')
x3 = T3(data='t3')
sess.add_all((x1, x2, x3))
sess.flush()
sess.delete(x1)
x1.t2 = x2
x2.t3 = x3
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_cascade_delete_postappend_twolevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x1 = T1(data='t1', t2=T2(data='t2'))
x3 = T3(data='t3')
sess.add_all((x1, x3))
sess.flush()
sess.delete(x1)
x1.t2.t3 = x3
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [])
def test_preserves_orphans_onelevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b')))
sess.add(x2)
sess.flush()
x2.t2 = None
sess.delete(x2)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [T2()])
eq_(sess.query(T3).all(), [T3()])
@testing.future
def test_preserves_orphans_onelevel_postremove(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b')))
sess.add(x2)
sess.flush()
sess.delete(x2)
x2.t2 = None
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [T2()])
eq_(sess.query(T3).all(), [T3()])
def test_preserves_orphans_twolevel(self):
T2, T3, T1 = (self.classes.T2,
self.classes.T3,
self.classes.T1)
sess = create_session()
x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a')))
sess.add(x)
sess.flush()
x.t2.t3 = None
sess.delete(x)
sess.flush()
eq_(sess.query(T1).all(), [])
eq_(sess.query(T2).all(), [])
eq_(sess.query(T3).all(), [T3()])
class M2MCascadeTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
test_needs_fk=True
)
Table('b', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
test_needs_fk=True
)
Table('atob', metadata,
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b.id')),
test_needs_fk=True
)
Table('c', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)),
Column('bid', Integer, ForeignKey('b.id')),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
pass
def test_delete_orphan(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
# if no backref here, delete-orphan failed until [ticket:427]
# was fixed
mapper(A, a, properties={'bs': relationship(B, secondary=atob,
cascade='all, delete-orphan', single_parent=True)})
mapper(B, b)
sess = create_session()
b1 = B(data='b1')
a1 = A(data='a1', bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
assert atob.count().scalar() ==0
assert b.count().scalar() == 0
assert a.count().scalar() == 1
def test_delete_orphan_dynamic(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={'bs': relationship(B, secondary=atob,
cascade='all, delete-orphan', single_parent=True,
lazy='dynamic')}) # if no backref here, delete-orphan
# failed until [ticket:427] was fixed
mapper(B, b)
sess = create_session()
b1 = B(data='b1')
a1 = A(data='a1', bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
assert atob.count().scalar() == 0
assert b.count().scalar() == 0
assert a.count().scalar() == 1
def test_delete_orphan_cascades(self):
a, A, c, b, C, B, atob = (self.tables.a,
self.classes.A,
self.tables.c,
self.tables.b,
self.classes.C,
self.classes.B,
self.tables.atob)
mapper(A, a, properties={
# if no backref here, delete-orphan failed until [ticket:427] was
# fixed
'bs':relationship(B, secondary=atob, cascade="all, delete-orphan",
single_parent=True)
})
mapper(B, b, properties={'cs':
relationship(C, cascade="all, delete-orphan")})
mapper(C, c)
sess = create_session()
b1 = B(data='b1', cs=[C(data='c1')])
a1 = A(data='a1', bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
assert atob.count().scalar() ==0
assert b.count().scalar() == 0
assert a.count().scalar() == 1
assert c.count().scalar() == 0
def test_cascade_delete(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs':relationship(B, secondary=atob, cascade="all, delete-orphan",
single_parent=True)
})
mapper(B, b)
sess = create_session()
a1 = A(data='a1', bs=[B(data='b1')])
sess.add(a1)
sess.flush()
sess.delete(a1)
sess.flush()
assert atob.count().scalar() ==0
assert b.count().scalar() == 0
assert a.count().scalar() == 0
def test_single_parent_error(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs':relationship(B, secondary=atob,
cascade="all, delete-orphan")
})
mapper(B, b)
assert_raises_message(
sa_exc.ArgumentError,
"On A.bs, delete-orphan cascade is not supported",
configure_mappers
)
def test_single_parent_raise(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs':relationship(B, secondary=atob, cascade="all, delete-orphan",
single_parent=True)
})
mapper(B, b)
sess = create_session()
b1 =B(data='b1')
a1 = A(data='a1', bs=[b1])
assert_raises(sa_exc.InvalidRequestError,
A, data='a2', bs=[b1]
)
def test_single_parent_backref(self):
"""test that setting m2m via a uselist=False backref bypasses the single_parent raise"""
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs':relationship(B,
secondary=atob,
cascade="all, delete-orphan", single_parent=True,
backref=backref('a', uselist=False))
})
mapper(B, b)
sess = create_session()
b1 =B(data='b1')
a1 = A(data='a1', bs=[b1])
assert_raises(
sa_exc.InvalidRequestError,
A, data='a2', bs=[b1]
)
a2 = A(data='a2')
b1.a = a2
assert b1 not in a1.bs
assert b1 in a2.bs
def test_none_m2m_collection_assignment(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs': relationship(B,
secondary=atob, backref="as")
})
mapper(B, b)
s = Session()
a1 = A(bs=[None])
s.add(a1)
eq_(a1.bs, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection A.bs",
s.commit
)
eq_(a1.bs, [None])
def test_none_m2m_collection_append(self):
a, A, B, b, atob = (self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob)
mapper(A, a, properties={
'bs': relationship(B,
secondary=atob, backref="as")
})
mapper(B, b)
s = Session()
a1 = A()
a1.bs.append(None)
s.add(a1)
eq_(a1.bs, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection A.bs",
s.commit
)
eq_(a1.bs, [None])
class O2MSelfReferentialDetelOrphanTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('node', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('node.id'))
)
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Node = cls.classes.Node
node = cls.tables.node
mapper(Node, node, properties={
"children":relationship(
Node,
cascade="all, delete-orphan",
backref=backref(
"parent",
remote_side=node.c.id
)
)
})
def test_self_referential_delete(self):
Node = self.classes.Node
s = Session()
n1, n2, n3, n4 = Node(), Node(), Node(), Node()
n1.children = [n2, n3]
n3.children = [n4]
s.add_all([n1, n2, n3, n4])
s.commit()
eq_(s.query(Node).count(), 4)
n1.children.remove(n3)
s.commit()
eq_(s.query(Node).count(), 2)
class NoBackrefCascadeTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
addresses, Dingaling, User, dingalings, Address, users = (cls.tables.addresses,
cls.classes.Dingaling,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.users)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user',
cascade_backrefs=False)
})
mapper(Dingaling, dingalings, properties={
'address' : relationship(Address, backref='dingalings',
cascade_backrefs=False)
})
def test_o2m_basic(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1 = User(name='u1')
sess.add(u1)
a1 = Address(email_address='a1')
a1.user = u1
assert a1 not in sess
def test_o2m_commit_warns(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1 = User(name='u1')
sess.add(u1)
a1 = Address(email_address='a1')
a1.user = u1
assert_raises_message(
sa_exc.SAWarning,
"not in session",
sess.commit
)
assert a1 not in sess
def test_o2m_flag_on_backref(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = Session()
a1 = Address(email_address='a1')
sess.add(a1)
d1 = Dingaling()
d1.address = a1
assert d1 in a1.dingalings
assert d1 in sess
sess.commit()
def test_m2o_basic(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = Session()
a1 = Address(email_address='a1')
d1 = Dingaling()
sess.add(d1)
a1.dingalings.append(d1)
assert a1 not in sess
def test_m2o_flag_on_backref(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
a1 = Address(email_address='a1')
sess.add(a1)
u1 = User(name='u1')
u1.addresses.append(a1)
assert u1 in sess
def test_m2o_commit_warns(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = Session()
a1 = Address(email_address='a1')
d1 = Dingaling()
sess.add(d1)
a1.dingalings.append(d1)
assert a1 not in sess
assert_raises_message(
sa_exc.SAWarning,
"not in session",
sess.commit
)
class PendingOrphanTestSingleLevel(fixtures.MappedTest):
"""Pending entities that are orphans"""
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('user_id', Integer,primary_key=True,
test_needs_autoincrement=True),
Column('name', String(40)))
Table('addresses', metadata,
Column('address_id', Integer,primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.user_id')),
Column('email_address', String(40)))
Table('orders', metadata,
Column('order_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.user_id'), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Order(cls.Comparable):
pass
def test_pending_standalone_orphan(self):
"""Standalone 'orphan' objects can now be persisted, if the underlying
constraints of the database allow it.
This now supports persisting of objects based on foreign key
values alone.
"""
users, orders, User, Address, Order, addresses = (self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
mapper(Order, orders)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(Address, cascade="all,delete-orphan",
backref="user"),
orders=relationship(Order, cascade='all, delete-orphan')
))
s = Session()
# the standalone Address goes in, its foreign key
# allows NULL
a = Address()
s.add(a)
s.commit()
# the standalone Order does not.
o = Order()
s.add(o)
assert_raises(sa_exc.DBAPIError, s.commit)
s.rollback()
# can assign o.user_id by foreign key,
# flush succeeds
u = User()
s.add(u)
s.flush()
o = Order(user_id=u.user_id)
s.add(o)
s.commit()
assert o in s and o not in s.new
def test_pending_collection_expunge(self):
"""Removing a pending item from a collection expunges it from
the session."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(Address, cascade="all,delete-orphan",
backref="user")
))
s = create_session()
u = User()
s.add(u)
s.flush()
a = Address()
u.addresses.append(a)
assert a in s
u.addresses.remove(a)
assert a not in s
s.delete(u)
s.flush()
assert a.address_id is None, "Error: address should not be persistent"
def test_nonorphans_ok(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(Address, cascade="all,delete",
backref="user")
))
s = create_session()
u = User(name='u1', addresses=[Address(email_address='ad1')])
s.add(u)
a1 = u.addresses[0]
u.addresses.remove(a1)
assert a1 in s
s.flush()
s.expunge_all()
eq_(s.query(Address).all(), [Address(email_address='ad1')])
class PendingOrphanTestTwoLevel(fixtures.MappedTest):
"""test usages stated at
http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3085
http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3119
"""
@classmethod
def define_tables(cls, metadata):
Table('order', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True)
)
Table('item', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('order_id', Integer, ForeignKey('order.id'), nullable=False)
)
Table('attribute', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('item_id', Integer, ForeignKey('item.id'), nullable=False)
)
@classmethod
def setup_classes(cls):
class Order(cls.Comparable):
pass
class Item(cls.Comparable):
pass
class Attribute(cls.Comparable):
pass
def test_singlelevel_remove(self):
item, Order, order, Item = (self.tables.item,
self.classes.Order,
self.tables.order,
self.classes.Item)
mapper(Order, order, properties={
'items':relationship(Item, cascade="all, delete-orphan")
})
mapper(Item, item)
s = Session()
o1 = Order()
s.add(o1)
i1 = Item()
o1.items.append(i1)
o1.items.remove(i1)
s.commit()
assert i1 not in o1.items
def test_multilevel_remove(self):
Item, Attribute, order, item, attribute, Order = (self.classes.Item,
self.classes.Attribute,
self.tables.order,
self.tables.item,
self.tables.attribute,
self.classes.Order)
mapper(Order, order, properties={
'items':relationship(Item, cascade="all, delete-orphan")
})
mapper(Item, item, properties={
'attributes':relationship(Attribute, cascade="all, delete-orphan")
})
mapper(Attribute, attribute)
s = Session()
o1 = Order()
s.add(o1)
i1 = Item()
a1 = Attribute()
i1.attributes.append(a1)
o1.items.append(i1)
assert i1 in s
assert a1 in s
# i1 is an orphan so the operation
# removes 'i1'. The operation
# cascades down to 'a1'.
o1.items.remove(i1)
assert i1 not in s
assert a1 not in s
s.commit()
assert o1 in s
assert a1 not in s
assert i1 not in s
assert a1 not in o1.items
class DoubleParentO2MOrphanTest(fixtures.MappedTest):
"""Test orphan behavior on an entity that requires
two parents via many-to-one (one-to-many collection.).
"""
@classmethod
def define_tables(cls, meta):
Table('sales_reps', meta,
Column('sales_rep_id', Integer,primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
Table('accounts', meta,
Column('account_id', Integer,primary_key=True,
test_needs_autoincrement=True),
Column('balance', Integer))
Table('customers', meta,
Column('customer_id', Integer,primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('sales_rep_id', Integer,
ForeignKey('sales_reps.sales_rep_id')),
Column('account_id', Integer,
ForeignKey('accounts.account_id')))
def _fixture(self, legacy_is_orphan, uselist):
sales_reps, customers, accounts = (self.tables.sales_reps,
self.tables.customers,
self.tables.accounts)
class Customer(fixtures.ComparableEntity):
pass
class Account(fixtures.ComparableEntity):
pass
class SalesRep(fixtures.ComparableEntity):
pass
mapper(Customer, customers, legacy_is_orphan=legacy_is_orphan)
mapper(Account, accounts, properties=dict(
customers=relationship(Customer,
cascade="all,delete-orphan",
backref="account",
uselist=uselist)))
mapper(SalesRep, sales_reps, properties=dict(
customers=relationship(Customer,
cascade="all,delete-orphan",
backref="sales_rep",
uselist=uselist)))
s = create_session()
a = Account(balance=0)
sr = SalesRep(name="John")
s.add_all((a, sr))
s.flush()
c = Customer(name="Jane")
if uselist:
a.customers.append(c)
sr.customers.append(c)
else:
a.customers = c
sr.customers = c
assert c in s
return s, c, a, sr
def test_double_parent_expunge_o2m_legacy(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(True, True)
a.customers.remove(c)
assert c in s, "Should not expunge customer yet, still has one parent"
sr.customers.remove(c)
assert c not in s, \
'Should expunge customer when both parents are gone'
def test_double_parent_expunge_o2m_current(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(False, True)
a.customers.remove(c)
assert c not in s, "Should expunge customer when either parent is gone"
sr.customers.remove(c)
assert c not in s, \
'Should expunge customer when both parents are gone'
def test_double_parent_expunge_o2o_legacy(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(True, False)
a.customers = None
assert c in s, "Should not expunge customer yet, still has one parent"
sr.customers = None
assert c not in s, \
'Should expunge customer when both parents are gone'
def test_double_parent_expunge_o2o_current(self):
"""test the delete-orphan uow event for multiple delete-orphan
parent relationships."""
s, c, a, sr = self._fixture(False, False)
a.customers = None
assert c not in s, "Should expunge customer when either parent is gone"
sr.customers = None
assert c not in s, \
'Should expunge customer when both parents are gone'
class DoubleParentM2OOrphanTest(fixtures.MappedTest):
"""Test orphan behavior on an entity that requires
two parents via one-to-many (many-to-one reference to the orphan).
"""
@classmethod
def define_tables(cls, metadata):
Table('addresses', metadata,
Column('address_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('street', String(30)),
)
Table('homes', metadata,
Column('home_id', Integer, primary_key=True, key="id",
test_needs_autoincrement=True),
Column('description', String(30)),
Column('address_id', Integer, ForeignKey('addresses.address_id'),
nullable=False),
)
Table('businesses', metadata,
Column('business_id', Integer, primary_key=True, key="id",
test_needs_autoincrement=True),
Column('description', String(30), key="description"),
Column('address_id', Integer, ForeignKey('addresses.address_id'),
nullable=False),
)
def test_non_orphan(self):
"""test that an entity can have two parent delete-orphan
cascades, and persists normally."""
homes, businesses, addresses = (self.tables.homes,
self.tables.businesses,
self.tables.addresses)
class Address(fixtures.ComparableEntity):
pass
class Home(fixtures.ComparableEntity):
pass
class Business(fixtures.ComparableEntity):
pass
mapper(Address, addresses)
mapper(Home, homes, properties={'address'
: relationship(Address, cascade='all,delete-orphan',
single_parent=True)})
mapper(Business, businesses, properties={'address'
: relationship(Address, cascade='all,delete-orphan',
single_parent=True)})
session = create_session()
h1 = Home(description='home1', address=Address(street='address1'))
b1 = Business(description='business1',
address=Address(street='address2'))
session.add_all((h1,b1))
session.flush()
session.expunge_all()
eq_(session.query(Home).get(h1.id), Home(description='home1',
address=Address(street='address1')))
eq_(session.query(Business).get(b1.id),
Business(description='business1',
address=Address(street='address2')))
def test_orphan(self):
"""test that an entity can have two parent delete-orphan
cascades, and is detected as an orphan when saved without a
parent."""
homes, businesses, addresses = (self.tables.homes,
self.tables.businesses,
self.tables.addresses)
class Address(fixtures.ComparableEntity):
pass
class Home(fixtures.ComparableEntity):
pass
class Business(fixtures.ComparableEntity):
pass
mapper(Address, addresses)
mapper(Home, homes, properties={'address'
: relationship(Address, cascade='all,delete-orphan',
single_parent=True)})
mapper(Business, businesses, properties={'address'
: relationship(Address, cascade='all,delete-orphan',
single_parent=True)})
session = create_session()
a1 = Address()
session.add(a1)
session.flush()
class CollectionAssignmentOrphanTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('table_a', metadata,
Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('name', String(30)))
Table('table_b', metadata,
Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('name', String(30)),
Column('a_id', Integer, ForeignKey('table_a.id')))
def test_basic(self):
table_b, table_a = self.tables.table_b, self.tables.table_a
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, table_a, properties={
'bs':relationship(B, cascade="all, delete-orphan")
})
mapper(B, table_b)
a1 = A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')])
sess = create_session()
sess.add(a1)
sess.flush()
sess.expunge_all()
eq_(sess.query(A).get(a1.id),
A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')]))
a1 = sess.query(A).get(a1.id)
assert not class_mapper(B)._is_orphan(
attributes.instance_state(a1.bs[0]))
a1.bs[0].foo='b2modified'
a1.bs[1].foo='b3modified'
sess.flush()
sess.expunge_all()
eq_(sess.query(A).get(a1.id),
A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')]))
class OrphanCriterionTest(fixtures.MappedTest):
@classmethod
def define_tables(self, metadata):
Table("core", metadata,
Column("id", Integer,
primary_key=True, test_needs_autoincrement=True),
Column("related_one_id", Integer, ForeignKey("related_one.id")),
Column("related_two_id", Integer, ForeignKey("related_two.id"))
)
Table("related_one", metadata,
Column("id", Integer,
primary_key=True, test_needs_autoincrement=True),
)
Table("related_two", metadata,
Column("id", Integer,
primary_key=True, test_needs_autoincrement=True),
)
def _fixture(self, legacy_is_orphan, persistent,
r1_present, r2_present, detach_event=True):
class Core(object):
pass
class RelatedOne(object):
def __init__(self, cores):
self.cores = cores
class RelatedTwo(object):
def __init__(self, cores):
self.cores = cores
mapper(Core, self.tables.core, legacy_is_orphan=legacy_is_orphan)
mapper(RelatedOne, self.tables.related_one, properties={
'cores': relationship(Core, cascade="all, delete-orphan",
backref="r1")
})
mapper(RelatedTwo, self.tables.related_two, properties={
'cores': relationship(Core, cascade="all, delete-orphan",
backref="r2")
})
c1 = Core()
if detach_event:
r1 = RelatedOne(cores=[c1])
r2 = RelatedTwo(cores=[c1])
else:
if r1_present:
r1 = RelatedOne(cores=[c1])
if r2_present:
r2 = RelatedTwo(cores=[c1])
if persistent:
s = Session()
s.add(c1)
s.flush()
if detach_event:
if not r1_present:
c1.r1 = None
if not r2_present:
c1.r2 = None
return c1
def _assert_not_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert not mapper._is_orphan(state)
def _assert_is_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert mapper._is_orphan(state)
def test_leg_pers_r1_r2(self):
c1 = self._fixture(True, True, True, True)
self._assert_not_orphan(c1)
def test_current_pers_r1_r2(self):
c1 = self._fixture(False, True, True, True)
self._assert_not_orphan(c1)
def test_leg_pers_r1_notr2(self):
c1 = self._fixture(True, True, True, False)
self._assert_not_orphan(c1)
def test_current_pers_r1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_pers_notr1_notr2(self):
c1 = self._fixture(True, True, False, False)
self._assert_is_orphan(c1)
def test_current_pers_notr1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_r1_r2(self):
c1 = self._fixture(True, False, True, True)
self._assert_not_orphan(c1)
def test_current_transient_r1_r2(self):
c1 = self._fixture(False, False, True, True)
self._assert_not_orphan(c1)
def test_leg_transient_r1_notr2(self):
c1 = self._fixture(True, False, True, False)
self._assert_not_orphan(c1)
def test_current_transient_r1_notr2(self):
c1 = self._fixture(False, False, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2(self):
c1 = self._fixture(True, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2(self):
c1 = self._fixture(False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2_noevent(self):
c1 = self._fixture(True, False, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2_noevent(self):
c1 = self._fixture(False, False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(True, True, False, False, False)
self._assert_not_orphan(c1)
def test_current_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(False, True, False, False, False)
self._assert_not_orphan(c1)
class O2MConflictTest(fixtures.MappedTest):
"""test that O2M dependency detects a change in parent, does the
right thing, and updates the collection/attribute.
"""
@classmethod
def define_tables(cls, metadata):
Table("parent", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table("child", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('parent.id'),
nullable=False)
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Child(cls.Comparable):
pass
def _do_move_test(self, delete_old):
Parent, Child = self.classes.Parent, self.classes.Child
sess = create_session()
p1, p2, c1 = Parent(), Parent(), Child()
if Parent.child.property.uselist:
p1.child.append(c1)
else:
p1.child = c1
sess.add_all([p1, c1])
sess.flush()
if delete_old:
sess.delete(p1)
if Parent.child.property.uselist:
p2.child.append(c1)
else:
p2.child = c1
sess.add(p2)
sess.flush()
eq_(sess.query(Child).filter(Child.parent_id==p2.id).all(), [c1])
def test_o2o_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False)
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2m_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=True)
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_backref_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False, backref='parent')
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delcascade_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False, cascade="all, delete")
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delorphan_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False,
cascade="all, delete, delete-orphan")
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delorphan_backref_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent, properties={
'child':relationship(Child, uselist=False,
cascade="all, delete, delete-orphan",
backref='parent')
})
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_backref_delorphan_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent)
mapper(Child, child, properties = {
'parent' : relationship(Parent, uselist=False, single_parent=True,
backref=backref('child', uselist=False),
cascade="all,delete,delete-orphan")
})
self._do_move_test(True)
self._do_move_test(False)
def test_o2m_backref_delorphan_delete_old(self):
Child, Parent, parent, child = (self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child)
mapper(Parent, parent)
mapper(Child, child, properties = {
'parent' : relationship(Parent, uselist=False, single_parent=True,
backref=backref('child', uselist=True),
cascade="all,delete,delete-orphan")
})
self._do_move_test(True)
self._do_move_test(False)
class PartialFlushTest(fixtures.MappedTest):
"""test cascade behavior as it relates to object lists passed to flush()."""
@classmethod
def define_tables(cls, metadata):
Table("base", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("descr", String(50))
)
Table("noninh_child", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
Table("parent", metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True)
)
Table("inh_child", metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("parent_id", Integer, ForeignKey("parent.id"))
)
def test_o2m_m2o(self):
base, noninh_child = self.tables.base, self.tables.noninh_child
class Base(fixtures.ComparableEntity):
pass
class Child(fixtures.ComparableEntity):
pass
mapper(Base, base, properties={
'children':relationship(Child, backref='parent')
})
mapper(Child, noninh_child)
sess = create_session()
c1, c2 = Child(), Child()
b1 = Base(descr='b1', children=[c1, c2])
sess.add(b1)
assert c1 in sess.new
assert c2 in sess.new
sess.flush([b1])
# c1, c2 get cascaded into the session on o2m.
# not sure if this is how I like this
# to work but that's how it works for now.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 not in sess.new
assert b1 in sess and b1 not in sess.new
sess = create_session()
c1, c2 = Child(), Child()
b1 = Base(descr='b1', children=[c1, c2])
sess.add(b1)
sess.flush([c1])
# m2o, otoh, doesn't cascade up the other way.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 in sess.new
assert b1 in sess and b1 in sess.new
sess = create_session()
c1, c2 = Child(), Child()
b1 = Base(descr='b1', children=[c1, c2])
sess.add(b1)
sess.flush([c1, c2])
# m2o, otoh, doesn't cascade up the other way.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 not in sess.new
assert b1 in sess and b1 in sess.new
def test_circular_sort(self):
"""test ticket 1306"""
base, inh_child, parent = (self.tables.base,
self.tables.inh_child,
self.tables.parent)
class Base(fixtures.ComparableEntity):
pass
class Parent(Base):
pass
class Child(Base):
pass
mapper(Base,base)
mapper(Child, inh_child,
inherits=Base,
properties={'parent': relationship(
Parent,
backref='children',
primaryjoin=inh_child.c.parent_id == parent.c.id
)}
)
mapper(Parent,parent, inherits=Base)
sess = create_session()
p1 = Parent()
c1, c2, c3 = Child(), Child(), Child()
p1.children = [c1, c2, c3]
sess.add(p1)
sess.flush([c1])
assert p1 in sess.new
assert c1 not in sess.new
assert c2 in sess.new
| mit |
vsoftco/qpp | unit_tests/lib/googletest-release-1.10.0/googlemock/scripts/upload_gmock.py | 770 | 2833 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| mit |
mahinthjoe/bedrock | py3env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| mpl-2.0 |
google/contentbox | third_party/openid/test/test_verifydisco.py | 77 | 11216 | import unittest
from openid import message
from openid.test.support import OpenIDTestMixin
from openid.consumer import consumer
from openid.test.test_consumer import TestIdRes
from openid.consumer import discover
def const(result):
"""Return a function that ignores any arguments and just returns
the specified result"""
def constResult(*args, **kwargs):
return result
return constResult
class DiscoveryVerificationTest(OpenIDTestMixin, TestIdRes):
def failUnlessProtocolError(self, prefix, callable, *args, **kwargs):
try:
result = callable(*args, **kwargs)
except consumer.ProtocolError, e:
self.failUnless(
e[0].startswith(prefix),
'Expected message prefix %r, got message %r' % (prefix, e[0]))
else:
self.fail('Expected ProtocolError with prefix %r, '
'got successful return %r' % (prefix, result))
def test_openID1NoLocalID(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.claimed_id = 'bogus'
msg = message.Message.fromOpenIDArgs({})
self.failUnlessProtocolError(
'Missing required field openid.identity',
self.consumer._verifyDiscoveryResults, msg, endpoint)
self.failUnlessLogEmpty()
def test_openID1NoEndpoint(self):
msg = message.Message.fromOpenIDArgs({'identity':'snakes on a plane'})
self.failUnlessRaises(RuntimeError,
self.consumer._verifyDiscoveryResults, msg)
self.failUnlessLogEmpty()
def test_openID2NoOPEndpointArg(self):
msg = message.Message.fromOpenIDArgs({'ns':message.OPENID2_NS})
self.failUnlessRaises(KeyError,
self.consumer._verifyDiscoveryResults, msg)
self.failUnlessLogEmpty()
def test_openID2LocalIDNoClaimed(self):
msg = message.Message.fromOpenIDArgs({'ns':message.OPENID2_NS,
'op_endpoint':'Phone Home',
'identity':'Jose Lius Borges'})
self.failUnlessProtocolError(
'openid.identity is present without',
self.consumer._verifyDiscoveryResults, msg)
self.failUnlessLogEmpty()
def test_openID2NoLocalIDClaimed(self):
msg = message.Message.fromOpenIDArgs({'ns':message.OPENID2_NS,
'op_endpoint':'Phone Home',
'claimed_id':'Manuel Noriega'})
self.failUnlessProtocolError(
'openid.claimed_id is present without',
self.consumer._verifyDiscoveryResults, msg)
self.failUnlessLogEmpty()
def test_openID2NoIdentifiers(self):
op_endpoint = 'Phone Home'
msg = message.Message.fromOpenIDArgs({'ns':message.OPENID2_NS,
'op_endpoint':op_endpoint})
result_endpoint = self.consumer._verifyDiscoveryResults(msg)
self.failUnless(result_endpoint.isOPIdentifier())
self.failUnlessEqual(op_endpoint, result_endpoint.server_url)
self.failUnlessEqual(None, result_endpoint.claimed_id)
self.failUnlessLogEmpty()
def test_openID2NoEndpointDoesDisco(self):
op_endpoint = 'Phone Home'
sentinel = discover.OpenIDServiceEndpoint()
sentinel.claimed_id = 'monkeysoft'
self.consumer._discoverAndVerify = const(sentinel)
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID2_NS,
'identity':'sour grapes',
'claimed_id':'monkeysoft',
'op_endpoint':op_endpoint})
result = self.consumer._verifyDiscoveryResults(msg)
self.failUnlessEqual(sentinel, result)
self.failUnlessLogMatches('No pre-discovered')
def test_openID2MismatchedDoesDisco(self):
mismatched = discover.OpenIDServiceEndpoint()
mismatched.identity = 'nothing special, but different'
mismatched.local_id = 'green cheese'
op_endpoint = 'Phone Home'
sentinel = discover.OpenIDServiceEndpoint()
sentinel.claimed_id = 'monkeysoft'
self.consumer._discoverAndVerify = const(sentinel)
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID2_NS,
'identity':'sour grapes',
'claimed_id':'monkeysoft',
'op_endpoint':op_endpoint})
result = self.consumer._verifyDiscoveryResults(msg, mismatched)
self.failUnlessEqual(sentinel, result)
self.failUnlessLogMatches('Error attempting to use stored',
'Attempting discovery')
def test_openid2UsePreDiscovered(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.local_id = 'my identity'
endpoint.claimed_id = 'i am sam'
endpoint.server_url = 'Phone Home'
endpoint.type_uris = [discover.OPENID_2_0_TYPE]
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID2_NS,
'identity':endpoint.local_id,
'claimed_id':endpoint.claimed_id,
'op_endpoint':endpoint.server_url})
result = self.consumer._verifyDiscoveryResults(msg, endpoint)
self.failUnless(result is endpoint)
self.failUnlessLogEmpty()
def test_openid2UsePreDiscoveredWrongType(self):
text = "verify failed"
endpoint = discover.OpenIDServiceEndpoint()
endpoint.local_id = 'my identity'
endpoint.claimed_id = 'i am sam'
endpoint.server_url = 'Phone Home'
endpoint.type_uris = [discover.OPENID_1_1_TYPE]
def discoverAndVerify(claimed_id, to_match_endpoints):
self.failUnlessEqual(claimed_id, endpoint.claimed_id)
for to_match in to_match_endpoints:
self.failUnlessEqual(claimed_id, to_match.claimed_id)
raise consumer.ProtocolError(text)
self.consumer._discoverAndVerify = discoverAndVerify
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID2_NS,
'identity':endpoint.local_id,
'claimed_id':endpoint.claimed_id,
'op_endpoint':endpoint.server_url})
try:
r = self.consumer._verifyDiscoveryResults(msg, endpoint)
except consumer.ProtocolError, e:
# Should we make more ProtocolError subclasses?
self.failUnless(str(e), text)
else:
self.fail("expected ProtocolError, %r returned." % (r,))
self.failUnlessLogMatches('Error attempting to use stored',
'Attempting discovery')
def test_openid1UsePreDiscovered(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.local_id = 'my identity'
endpoint.claimed_id = 'i am sam'
endpoint.server_url = 'Phone Home'
endpoint.type_uris = [discover.OPENID_1_1_TYPE]
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID1_NS,
'identity':endpoint.local_id})
result = self.consumer._verifyDiscoveryResults(msg, endpoint)
self.failUnless(result is endpoint)
self.failUnlessLogEmpty()
def test_openid1UsePreDiscoveredWrongType(self):
class VerifiedError(Exception): pass
def discoverAndVerify(claimed_id, _to_match):
raise VerifiedError
self.consumer._discoverAndVerify = discoverAndVerify
endpoint = discover.OpenIDServiceEndpoint()
endpoint.local_id = 'my identity'
endpoint.claimed_id = 'i am sam'
endpoint.server_url = 'Phone Home'
endpoint.type_uris = [discover.OPENID_2_0_TYPE]
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID1_NS,
'identity':endpoint.local_id})
self.failUnlessRaises(
VerifiedError,
self.consumer._verifyDiscoveryResults, msg, endpoint)
self.failUnlessLogMatches('Error attempting to use stored',
'Attempting discovery')
def test_openid2Fragment(self):
claimed_id = "http://unittest.invalid/"
claimed_id_frag = claimed_id + "#fragment"
endpoint = discover.OpenIDServiceEndpoint()
endpoint.local_id = 'my identity'
endpoint.claimed_id = claimed_id
endpoint.server_url = 'Phone Home'
endpoint.type_uris = [discover.OPENID_2_0_TYPE]
msg = message.Message.fromOpenIDArgs(
{'ns':message.OPENID2_NS,
'identity':endpoint.local_id,
'claimed_id': claimed_id_frag,
'op_endpoint': endpoint.server_url})
result = self.consumer._verifyDiscoveryResults(msg, endpoint)
self.failUnlessEqual(result.local_id, endpoint.local_id)
self.failUnlessEqual(result.server_url, endpoint.server_url)
self.failUnlessEqual(result.type_uris, endpoint.type_uris)
self.failUnlessEqual(result.claimed_id, claimed_id_frag)
self.failUnlessLogEmpty()
def test_openid1Fallback1_0(self):
claimed_id = 'http://claimed.id/'
endpoint = None
resp_mesg = message.Message.fromOpenIDArgs({
'ns': message.OPENID1_NS,
'identity': claimed_id})
# Pass the OpenID 1 claimed_id this way since we're passing
# None for the endpoint.
resp_mesg.setArg(message.BARE_NS, 'openid1_claimed_id', claimed_id)
# We expect the OpenID 1 discovery verification to try
# matching the discovered endpoint against the 1.1 type and
# fall back to 1.0.
expected_endpoint = discover.OpenIDServiceEndpoint()
expected_endpoint.type_uris = [discover.OPENID_1_0_TYPE]
expected_endpoint.local_id = None
expected_endpoint.claimed_id = claimed_id
discovered_services = [expected_endpoint]
self.consumer._discover = lambda *args: ('unused', discovered_services)
actual_endpoint = self.consumer._verifyDiscoveryResults(
resp_mesg, endpoint)
self.failUnless(actual_endpoint is expected_endpoint)
# XXX: test the implementation of _discoverAndVerify
class TestVerifyDiscoverySingle(TestIdRes):
# XXX: more test the implementation of _verifyDiscoverySingle
def test_endpointWithoutLocalID(self):
# An endpoint like this with no local_id is generated as a result of
# e.g. Yadis discovery with no LocalID tag.
endpoint = discover.OpenIDServiceEndpoint()
endpoint.server_url = "http://localhost:8000/openidserver"
endpoint.claimed_id = "http://localhost:8000/id/id-jo"
to_match = discover.OpenIDServiceEndpoint()
to_match.server_url = "http://localhost:8000/openidserver"
to_match.claimed_id = "http://localhost:8000/id/id-jo"
to_match.local_id = "http://localhost:8000/id/id-jo"
result = self.consumer._verifyDiscoverySingle(endpoint, to_match)
# result should always be None, raises exception on failure.
self.failUnlessEqual(result, None)
self.failUnlessLogEmpty()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pygments/lexers/dylan.py | 25 | 10421 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set((
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'))
keywords = set((
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'))
operators = set((
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'))
functions = set((
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'))
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile('\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
| gpl-3.0 |
nivekkagicom/uncrustify | scripts/make_options.py | 3 | 5064 | #!/usr/bin/env python
import argparse
import io
import os
import re
max_name_len = 60
re_name = re.compile(r'^[a-z][a-z0-9_]*$')
re_group = re.compile(r'//BEGIN')
re_option = re.compile(r'extern (Bounded)?Option<[^>]+>')
re_default = re.compile(r' *// *= *(.*)')
groups = []
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
script = os.path.relpath(__file__, root)
# =============================================================================
class Group(object):
# -------------------------------------------------------------------------
def __init__(self, desc):
self.desc = desc
self.options = []
# -------------------------------------------------------------------------
def append(self, option):
self.options.append(option)
# =============================================================================
class Option(object):
# -------------------------------------------------------------------------
def __init__(self, name, dval, decl, desc):
if re_name.match(name) is None:
raise ValueError('{!r} is not a valid option name'.format(name))
if len(name) > max_name_len:
raise ValueError(
'{!r} (length={:d}) exceeds the maximum length {:d}'.format(
name, len(name), max_name_len))
self.desc = u'\n'.join(desc)
self.decl = decl[7:]
self.name = name
self.dval = dval
# -------------------------------------------------------------------------
def write_declaration(self, out):
out.write(u'{} {} = {{\n'.format(self.decl, self.name))
out.write(u' "{}",\n'.format(self.name))
out.write(u' u8R"__(\n{}\n)__"'.format(self.desc))
if self.dval is not None:
out.write(u',\n {}'.format(self.dval))
out.write(u'\n};\n\n')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# -----------------------------------------------------------------------------
def extract_default(decl):
m = re_default.match(decl)
if m:
return m.group(1)
return None
# -----------------------------------------------------------------------------
def write_banner(out, args):
out.write(
u'/**\n'
u' * @file {out_name}\n'
u' * Declaration and initializers for all options.\n'
u' * Automatically generated by <code>{script}</code>\n'
u' * from {in_name}.\n'
u' */\n'
u'\n'.format(
in_name=os.path.basename(args.header),
out_name=os.path.basename(args.output),
script=script))
# -----------------------------------------------------------------------------
def write_declarations(out, args):
for group in groups:
for option in group.options:
option.write_declaration(out)
# -----------------------------------------------------------------------------
def write_registrations(out, args):
for group in groups:
out.write(u'\n begin_option_group(u8R"__(\n{}\n)__");\n\n'.format(
group.desc))
for option in group.options:
out.write(u' register_option(&options::{});\n'.format(
option.name))
# -----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description='Generate options.cpp')
parser.add_argument('output', type=str,
help='location of options.cpp to write')
parser.add_argument('header', type=str,
help='location of options.h to read')
parser.add_argument('template', type=str,
help='location of options.cpp.in to use as template')
args = parser.parse_args()
with io.open(args.header, 'rt', encoding='utf-8') as f:
desc = []
for line in iter(f.readline, ''):
line = line.strip()
if re_group.match(line):
groups.append(Group(line[8:]))
elif not len(line):
desc = []
elif line == '//':
desc.append('')
elif line.startswith('// '):
desc.append(line[3:])
elif re_option.match(line):
n, d = f.readline().split(';')
o = Option(n, extract_default(d.strip()), line, desc)
groups[-1].append(o)
replacements = {
u'##BANNER##': write_banner,
u'##DECLARATIONS##': write_declarations,
u'##REGISTRATIONS##': write_registrations,
}
with io.open(args.output, 'wt', encoding='utf-8') as out:
with io.open(args.template, 'rt', encoding='utf-8') as t:
for line in t:
directive = line.strip()
if directive in replacements:
replacements[directive](out, args)
else:
out.write(line)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if __name__ == '__main__':
main()
| gpl-2.0 |
mehulsbhatt/nsscache | nss_cache/maps/shadow.py | 2 | 2297 | # Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a shadow map for nsscache.
ShadowMap: An implementation of NSS shadow maps based on the Map
class.
ShadowMapEntry: A shadow map entry based on the MapEntry class.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class ShadowMap(maps.Map):
"""This class represents an NSS shadow map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a ShadowMap object using optional iterable."""
super(ShadowMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a ShadowMapEntry object."""
if not isinstance(entry, ShadowMapEntry):
raise TypeError
return super(ShadowMap, self).Add(entry)
class ShadowMapEntry(maps.MapEntry):
"""This class represents NSS shadow map entries."""
__slots__ = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
_KEY = 'name'
_ATTRS = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
def __init__(self, data=None):
"""Construct a ShadowMapEntry, setting reasonable defaults."""
self.name = None
self.passwd = None
self.lstchg = None
self.min = None
self.max = None
self.warn = None
self.inact = None
self.expire = None
self.flag = None
super(ShadowMapEntry, self).__init__(data)
# Seed data with defaults if needed
if self.passwd is None: self.passwd = '!!'
| gpl-2.0 |
kenwang815/KodiPlugins | script.module.oceanktv/lib/youtube_dl/extractor/abcnews.py | 12 | 5011 | # coding: utf-8
from __future__ import unicode_literals
import calendar
import re
import time
from .amp import AMPIE
from .common import InfoExtractor
from ..compat import compat_urlparse
class AbcNewsVideoIE(AMPIE):
IE_NAME = 'abcnews:video'
_VALID_URL = 'http://abcnews.go.com/[^/]+/video/(?P<display_id>[0-9a-z-]+)-(?P<id>\d+)'
_TESTS = [{
'url': 'http://abcnews.go.com/ThisWeek/video/week-exclusive-irans-foreign-minister-zarif-20411932',
'info_dict': {
'id': '20411932',
'ext': 'mp4',
'display_id': 'week-exclusive-irans-foreign-minister-zarif',
'title': '\'This Week\' Exclusive: Iran\'s Foreign Minister Zarif',
'description': 'George Stephanopoulos goes one-on-one with Iranian Foreign Minister Dr. Javad Zarif.',
'duration': 180,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://abcnews.go.com/2020/video/2020-husband-stands-teacher-jail-student-affairs-26119478',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
info_dict = self._extract_feed_info(
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
info_dict.update({
'id': video_id,
'display_id': display_id,
})
return info_dict
class AbcNewsIE(InfoExtractor):
IE_NAME = 'abcnews'
_VALID_URL = 'https?://abcnews\.go\.com/(?:[^/]+/)+(?P<display_id>[0-9a-z-]+)/story\?id=(?P<id>\d+)'
_TESTS = [{
'url': 'http://abcnews.go.com/Blotter/News/dramatic-video-rare-death-job-america/story?id=10498713#.UIhwosWHLjY',
'info_dict': {
'id': '10498713',
'ext': 'flv',
'display_id': 'dramatic-video-rare-death-job-america',
'title': 'Occupational Hazards',
'description': 'Nightline investigates the dangers that lurk at various jobs.',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20100428',
'timestamp': 1272412800,
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Entertainment/justin-timberlake-performs-stop-feeling-eurovision-2016/story?id=39125818',
'info_dict': {
'id': '39125818',
'ext': 'mp4',
'display_id': 'justin-timberlake-performs-stop-feeling-eurovision-2016',
'title': 'Justin Timberlake Drops Hints For Secret Single',
'description': 'Lara Spencer reports the buzziest stories of the day in "GMA" Pop News.',
'upload_date': '20160515',
'timestamp': 1463329500,
},
'params': {
# m3u8 download
'skip_download': True,
# The embedded YouTube video is blocked due to copyright issues
'playlist_items': '1',
},
'add_ie': ['AbcNewsVideo'],
}, {
'url': 'http://abcnews.go.com/Technology/exclusive-apple-ceo-tim-cook-iphone-cracking-software/story?id=37173343',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'window\.abcnvideo\.url\s*=\s*"([^"]+)"', webpage, 'video URL')
full_video_url = compat_urlparse.urljoin(url, video_url)
youtube_url = self._html_search_regex(
r'<iframe[^>]+src="(https://www\.youtube\.com/embed/[^"]+)"',
webpage, 'YouTube URL', default=None)
timestamp = None
date_str = self._html_search_regex(
r'<span[^>]+class="timestamp">([^<]+)</span>',
webpage, 'timestamp', fatal=False)
if date_str:
tz_offset = 0
if date_str.endswith(' ET'): # Eastern Time
tz_offset = -5
date_str = date_str[:-3]
date_formats = ['%b. %d, %Y', '%b %d, %Y, %I:%M %p']
for date_format in date_formats:
try:
timestamp = calendar.timegm(time.strptime(date_str.strip(), date_format))
except ValueError:
continue
if timestamp is not None:
timestamp -= tz_offset * 3600
entry = {
'_type': 'url_transparent',
'ie_key': AbcNewsVideoIE.ie_key(),
'url': full_video_url,
'id': video_id,
'display_id': display_id,
'timestamp': timestamp,
}
if youtube_url:
entries = [entry, self.url_result(youtube_url, 'Youtube')]
return self.playlist_result(entries)
return entry
| gpl-2.0 |
openstack/mistral | mistral/lang/v2/policies.py | 1 | 2902 | # Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.lang import types
from mistral.lang.v2 import base
from mistral.lang.v2 import retry_policy
class PoliciesSpec(base.BaseSpec):
# See http://json-schema.org
_schema = {
"type": "object",
"properties": {
"retry": retry_policy.RetrySpec.get_schema(),
"wait-before": types.EXPRESSION_OR_POSITIVE_INTEGER,
"wait-after": types.EXPRESSION_OR_POSITIVE_INTEGER,
"timeout": types.EXPRESSION_OR_POSITIVE_INTEGER,
"pause-before": types.EXPRESSION_OR_BOOLEAN,
"concurrency": types.EXPRESSION_OR_POSITIVE_INTEGER,
"fail-on": types.EXPRESSION_OR_BOOLEAN
},
"additionalProperties": False
}
@classmethod
def get_schema(cls, includes=('definitions',)):
return super(PoliciesSpec, cls).get_schema(includes)
def __init__(self, data, validate):
super(PoliciesSpec, self).__init__(data, validate)
self._retry = self._spec_property('retry', retry_policy.RetrySpec)
self._wait_before = data.get('wait-before', 0)
self._wait_after = data.get('wait-after', 0)
self._timeout = data.get('timeout', 0)
self._pause_before = data.get('pause-before', False)
self._concurrency = data.get('concurrency', 0)
self._fail_on = data.get('fail-on', False)
def validate_schema(self):
super(PoliciesSpec, self).validate_schema()
# Validate YAQL expressions.
self.validate_expr(self._data.get('wait-before', 0))
self.validate_expr(self._data.get('wait-after', 0))
self.validate_expr(self._data.get('timeout', 0))
self.validate_expr(self._data.get('pause-before', False))
self.validate_expr(self._data.get('concurrency', 0))
self.validate_expr(self._data.get('fail-on', False))
def get_retry(self):
return self._retry
def get_wait_before(self):
return self._wait_before
def get_wait_after(self):
return self._wait_after
def get_timeout(self):
return self._timeout
def get_pause_before(self):
return self._pause_before
def get_concurrency(self):
return self._concurrency
def get_fail_on(self):
return self._fail_on
| apache-2.0 |
VictorLowther/swift | swift/container/sync.py | 4 | 19917 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import ctime, time
from random import random, shuffle
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.container import server as container_server
from swiftclient import ClientException, delete_object, put_object, \
quote
from swift.common.direct_client import direct_get_object
from swift.common.ring import Ring
from swift.common.db import ContainerBroker
from swift.common.utils import audit_location_generator, get_logger, \
hash_path, TRUE_VALUES, validate_sync_to, whataremyips
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
class _Iter2FileLikeObject(object):
"""
Returns an iterator's contents via :func:`read`, making it look like a file
object.
"""
def __init__(self, iterator):
self.iterator = iterator
self._chunk = ''
def read(self, size=-1):
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was
requested may be returned, even if no size parameter was given.
"""
if size < 0:
chunk = self._chunk
self._chunk = ''
return chunk + ''.join(self.iterator)
chunk = self._chunk
self._chunk = ''
if chunk and len(chunk) <= size:
return chunk
try:
chunk += self.iterator.next()
except StopIteration:
pass
if len(chunk) <= size:
return chunk
self._chunk = chunk[size:]
return chunk[:size]
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
.. note::
Container sync will sync object POSTs only if the proxy server is set
to use "object_post_as_copy = true" which is the default. So-called
fast object posts, "object_post_as_copy = false" do not update the
container listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
:param object_ring: If None, the <swift_dir>/object.ring.gz will be loaded.
This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, object_ring=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = \
conf.get('mount_check', 'true').lower() in TRUE_VALUES
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a conatiner sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: The list of hosts we're allowed to send syncs to.
self.allowed_sync_hosts = [h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.proxy = conf.get('sync_proxy')
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers that didn't have sync turned on.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Time of last stats report.
self.reported = time()
swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
#: swift.common.ring.Ring for locating objects.
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self._myips = whataremyips()
self._myport = int(conf.get('bind_port', 6001))
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
def run_forever(self):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR,
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR,
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
try:
if not path.endswith('.db'):
return
broker = ContainerBroker(path)
info = broker.get_info()
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if node['ip'] in self._myips and node['port'] == self._myport:
break
else:
return
if not broker.is_deleted():
sync_to = None
sync_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.iteritems():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
sync_key = value
if not sync_to or not sync_key:
self.container_skips += 1
self.logger.increment('skips')
return
sync_to = sync_to.rstrip('/')
err = validate_sync_to(sync_to, self.allowed_sync_hosts)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': broker.db_file,
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
stop_at = time() + self.container_time
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] >= sync_point1:
break
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only intially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). This section
# will attempt to sync previously skipped rows in case the
# other nodes didn't succeed.
if unpack_from('>I', key)[0] % \
len(nodes) != ordinal:
if not self.container_sync_row(row, sync_to, sync_key,
broker, info):
return
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only intially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
if not self.container_sync_row(row, sync_to, sync_key,
broker, info):
return
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout), err:
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'), (broker.db_file))
def container_sync_row(self, row, sync_to, sync_key, broker, info):
"""
Sends the update the row indicates to the sync_to container.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param sync_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:returns: True on success
"""
try:
start_time = time()
if row['deleted']:
try:
delete_object(sync_to, name=row['name'],
headers={'x-timestamp': row['created_at'],
'x-container-sync-key': sync_key},
proxy=self.proxy)
except ClientException, err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
part, nodes = self.object_ring.get_nodes(
info['account'], info['container'],
row['name'])
shuffle(nodes)
exc = None
looking_for_timestamp = float(row['created_at'])
timestamp = -1
headers = body = None
for node in nodes:
try:
these_headers, this_body = direct_get_object(node,
part, info['account'], info['container'],
row['name'], resp_chunk_size=65536)
this_timestamp = float(these_headers['x-timestamp'])
if this_timestamp > timestamp:
timestamp = this_timestamp
headers = these_headers
body = this_body
except ClientException, err:
# If any errors are not 404, make sure we report the
# non-404 one. We don't want to mistakenly assume the
# object no longer exists just because one says so and
# the others errored for some other reason.
if not exc or exc.http_status == HTTP_NOT_FOUND:
exc = err
if timestamp < looking_for_timestamp:
if exc:
raise exc
raise Exception(_('Unknown exception trying to GET: '
'%(node)r %(account)r %(container)r %(object)r'),
{'node': node, 'part': part,
'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
headers['x-timestamp'] = row['created_at']
headers['x-container-sync-key'] = sync_key
put_object(sync_to, name=row['name'], headers=headers,
contents=_Iter2FileLikeObject(body), proxy=self.proxy)
self.container_puts += 1
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException, err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(_('Unauth %(sync_from)r '
'=> %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(_('Not found %(sync_from)r '
'=> %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': broker.db_file, 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout), err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': broker.db_file, 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
| apache-2.0 |
krishardy/argentum-control | src/win32_setup.py | 2 | 1434 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Argentum Control GUI
Copyright (C) 2013 Isabella Stevens
Copyright (C) 2014 Michael Shiel
Copyright (C) 2015 Trent Waddington
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["os"], "excludes": ["tkinter"]}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "Argentum",
version = "0.0",
description = "Argentum GUI application!",
options = {"build_exe": build_exe_options},
executables = [Executable("gui.py", base=base, icon='icon.ico')])
| gpl-3.0 |
pjreed/catkin_tools | docs/conf.py | 8 | 8654 | # -*- coding: utf-8 -*-
#
# catkin_tools documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 24 18:03:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
#'sphinxcontrib.programoutput',
#'sphinxcontrib.ansi',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'catkin_tools'
copyright = u'2014, Open Source Robotics Foundation, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'catkin_toolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'catkin_tools.tex', u'catkin\\_tools Documentation',
u'William Woodall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'catkin_tools', u'catkin_tools Documentation',
[u'William Woodall'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'catkin_tools', u'catkin_tools Documentation',
u'William Woodall', 'catkin_tools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# programoutput options
#programoutput_use_ansi = True
| apache-2.0 |
sjshao09/KaggleRH | gunja_split.py | 1 | 22414 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection, preprocessing
import xgboost as xgb
import datetime
# ----------------- Settings ----------------- #
EN_CROSSVALIDATION = True
######################### Train for Investment Data ############################
DEFAULT_TRAIN_ROUNDS = 409
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for Investment Data ----------------- #
df = df[df.product_type=="Investment"]
#df = df[df.price_doc>1000000]
df = df[df.price_doc/df.full_sq <= np.exp(13.05)]
#df = df[df.price_doc/df.full_sq >= np.exp(9)]
test_df.product_type = "Investment"
y_train = df["price_doc"] * 0.97
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
'''
# ----------------- Predicting Training Data for Ensemble ----------------- #
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df.loc[(df.full_sq<=1) & (df.life_sq<=1), 'full_sq'] = np.nan
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
df['product_type'] = "Investment"
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
y_train = df["price_doc"]
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
dtrain = xgb.DMatrix(x_train, y_train)
train_predict = model.predict(dtrain)
invest_train_predict_df = pd.DataFrame({'id': df.id, 'price_doc': train_predict})
# ----------------- Predicting Training Data for Ensemble -------end------- #
'''
y_predict = model.predict(dtest)
gunja_invest = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_invest.head()
########################## Train for OwnerOccupier Data #########################
# ----------------- Settings ----------------- #
DEFAULT_TRAIN_ROUNDS = 704
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for OwnerOccupier Data ----------------- #
df = df[df.product_type=="OwnerOccupier"]
df = df[df.price_doc/df.full_sq <= np.exp(13.15)]
df = df[df.price_doc/df.full_sq >= np.exp(10.4)]
test_df.product_type = "OwnerOccupier"
y_train = df["price_doc"]
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=10,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
'''
# ----------------- Predicting Training Data for Ensemble ----------------- #
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df.loc[(df.full_sq<=1) & (df.life_sq<=1), 'full_sq'] = np.nan
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
df.product_type = "OwnerOccupier"
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
y_train = df["price_doc"]
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
dtrain = xgb.DMatrix(x_train, y_train)
train_predict = model.predict(dtrain)
owner_train_predict_df = pd.DataFrame({'id': df.id, 'price_doc': train_predict})
# ----------------- Predicting Training Data for Ensemble -------end------- #
'''
y_predict = model.predict(dtest)
gunja_owner = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_owner.head()
############################## Merge #############################
'''
# For Training Data Set
df = pd.read_csv('input/train.csv')
df['price_doc'] = invest_train_predict_df['price_doc']
df.loc[df.product_type=="OwnerOccupier", 'price_doc'] = owner_train_predict_df['price_doc']
train_predict = df[["id", "price_doc"]]
train_predict.to_csv('gunja_train.csv', index=False)
'''
# For Test Data Set
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
test_df['price_doc'] = gunja_invest['price_doc']
test_df.loc[test_df.product_type=="OwnerOccupier", 'price_doc'] = gunja_owner['price_doc']
gunja_output = test_df[["id", "price_doc"]]
print gunja_output.head()
gunja_output.to_csv('gunja_test.csv', index=False)
print "[INFO] Average Price =", gunja_output['price_doc'].mean()
| mit |
markovmodel/PyEMMA | pyemma/msm/tests/test_msm.py | 1 | 43096 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""Unit test for the MSM module
.. moduleauthor:: F. Noe <frank DOT noe AT fu-berlin DOT de>
.. moduleauthor:: B. Trendelkamp-Schroer <benjamin DOT trendelkamp-schroer AT fu-berlin DOT de>
"""
import unittest
import numpy as np
import scipy.sparse
import warnings
from msmtools.generation import generate_traj
from msmtools.estimation import count_matrix, largest_connected_set, largest_connected_submatrix, transition_matrix
from msmtools.analysis import stationary_distribution, timescales
from pyemma.util.numeric import assert_allclose
from pyemma.msm.tests.birth_death_chain import BirthDeathChain
from pyemma.msm import estimate_markov_model, MaximumLikelihoodMSM
class TestMSMSimple(unittest.TestCase):
def setUp(self):
"""Store state of the rng"""
self.state = np.random.mtrand.get_state()
"""Reseed the rng to enforce 'deterministic' behavior"""
np.random.mtrand.seed(42)
"""Meta-stable birth-death chain"""
b = 2
q = np.zeros(7)
p = np.zeros(7)
q[1:] = 0.5
p[0:-1] = 0.5
q[2] = 1.0 - 10 ** (-b)
q[4] = 10 ** (-b)
p[2] = 10 ** (-b)
p[4] = 1.0 - 10 ** (-b)
bdc = BirthDeathChain(q, p)
P = bdc.transition_matrix()
self.dtraj = generate_traj(P, 10000, start=0)
self.tau = 1
"""Estimate MSM"""
self.C_MSM = count_matrix(self.dtraj, self.tau, sliding=True)
self.lcc_MSM = largest_connected_set(self.C_MSM)
self.Ccc_MSM = largest_connected_submatrix(self.C_MSM, lcc=self.lcc_MSM)
self.mle_rev_max_err = 1E-8
self.P_MSM = transition_matrix(self.Ccc_MSM, reversible=True, maxerr=self.mle_rev_max_err)
self.mu_MSM = stationary_distribution(self.P_MSM)
self.k = 3
self.ts = timescales(self.P_MSM, k=self.k, tau=self.tau)
def tearDown(self):
"""Revert the state of the rng"""
np.random.mtrand.set_state(self.state)
def test_MSM(self):
msm = estimate_markov_model(self.dtraj, self.tau, maxerr=self.mle_rev_max_err)
assert_allclose(self.dtraj, msm.discrete_trajectories_full[0])
self.assertEqual(self.tau, msm.lagtime)
assert_allclose(self.lcc_MSM, msm.largest_connected_set)
self.assertTrue(np.allclose(self.Ccc_MSM.toarray(), msm.count_matrix_active))
self.assertTrue(np.allclose(self.C_MSM.toarray(), msm.count_matrix_full))
self.assertTrue(np.allclose(self.P_MSM.toarray(), msm.transition_matrix))
assert_allclose(self.mu_MSM, msm.stationary_distribution)
assert_allclose(self.ts[1:], msm.timescales(self.k - 1))
def test_MSM_sparse(self):
msm = estimate_markov_model(self.dtraj, self.tau, sparse=True)
assert_allclose(self.dtraj, msm.discrete_trajectories_full[0])
self.assertEqual(self.tau, msm.lagtime)
assert_allclose(self.lcc_MSM, msm.largest_connected_set)
self.assertTrue(np.allclose(self.Ccc_MSM.toarray(), msm.count_matrix_active.toarray()))
self.assertTrue(np.allclose(self.C_MSM.toarray(), msm.count_matrix_full.toarray()))
self.assertTrue(np.allclose(self.P_MSM.toarray(), msm.transition_matrix.toarray()))
assert_allclose(self.mu_MSM, msm.stationary_distribution)
assert_allclose(self.ts[1:], msm.timescales(self.k - 1))
def test_pcca_recompute(self):
msm = estimate_markov_model(self.dtraj, self.tau)
pcca1 = msm.pcca(2)
msm.estimate(self.dtraj, lag=self.tau + 1)
pcca2 = msm.pcca(2)
assert pcca2 is not pcca1
def test_rdl_recompute(self):
""" test for issue 1301. Should recompute RDL decomposition in case of new transition matrix. """
msm = estimate_markov_model(self.dtraj, self.tau)
ev1 = msm.eigenvectors_left(2)
msm.estimate(self.dtraj, lag=self.tau+1)
ev2 = msm.eigenvectors_left(2)
assert ev2 is not ev1
class TestMSMRevPi(unittest.TestCase):
r"""Checks if the MLMSM correctly handles the active set computation
if a stationary distribution is given"""
def setUp(self):
pass
def tearDown(self):
pass
def test_valid_stationary_vector(self):
dtraj = np.array([0, 0, 1, 0, 1, 2])
pi_valid = np.array([0.1, 0.9, 0.0])
pi_invalid = np.array([0.1, 0.9])
active_set = np.array([0, 1])
msm = estimate_markov_model(dtraj, 1, statdist=pi_valid)
self.assertTrue(np.all(msm.active_set==active_set))
with self.assertRaises(ValueError):
msm = estimate_markov_model(dtraj, 1, statdist=pi_invalid)
def test_valid_trajectory(self):
pi = np.array([0.1, 0.0, 0.9])
dtraj_invalid = np.array([1, 1, 1, 1, 1, 1, 1])
dtraj_valid = np.array([0, 2, 0, 2, 2, 0, 1, 1])
msm = estimate_markov_model(dtraj_valid, 1, statdist=pi)
self.assertTrue(np.all(msm.active_set==np.array([0, 2])))
with self.assertRaises(ValueError):
msm = estimate_markov_model(dtraj_invalid, 1, statdist=pi)
class TestMSMDoubleWell(unittest.TestCase):
@classmethod
def setUpClass(cls):
import pyemma.datasets
cls.dtraj = pyemma.datasets.load_2well_discrete().dtraj_T100K_dt10
nu = 1.*np.bincount(cls.dtraj)
cls.statdist = nu/nu.sum()
cls.tau = 10
maxerr = 1e-12
cls.msmrev = estimate_markov_model(cls.dtraj, cls.tau ,maxerr=maxerr)
cls.msmrevpi = estimate_markov_model(cls.dtraj, cls.tau,maxerr=maxerr,
statdist=cls.statdist)
cls.msm = estimate_markov_model(cls.dtraj, cls.tau, reversible=False, maxerr=maxerr)
"""Sparse"""
cls.msmrev_sparse = estimate_markov_model(cls.dtraj, cls.tau, sparse=True, maxerr=maxerr)
cls.msmrevpi_sparse = estimate_markov_model(cls.dtraj, cls.tau,maxerr=maxerr,
statdist=cls.statdist,
sparse=True)
cls.msm_sparse = estimate_markov_model(cls.dtraj, cls.tau, reversible=False, sparse=True, maxerr=maxerr)
# ---------------------------------
# SCORE
# ---------------------------------
def _score(self, msm):
# check estimator args are not overwritten, if default arguments are used.
old_score_k = msm.score_k
old_score_method = msm.score_method
dtrajs_test = self.dtraj[80000:]
msm.score(dtrajs_test)
assert msm.score_k == old_score_k
assert msm.score_method == old_score_method
s1 = msm.score(dtrajs_test, score_method='VAMP1', score_k=2)
assert msm.score_k == 2
assert msm.score_method == 'VAMP1'
assert 1.0 <= s1 <= 2.0
s2 = msm.score(dtrajs_test, score_method='VAMP2', score_k=2)
assert 1.0 <= s2 <= 2.0
assert msm.score_k == 2
assert msm.score_method == 'VAMP2'
# se = msm.score(dtrajs_test, score_method='VAMPE', score_k=2)
# se_inf = msm.score(dtrajs_test, score_method='VAMPE', score_k=None)
def test_score(self):
self._score(self.msmrev)
self._score(self.msmrevpi)
self._score(self.msm)
self._score(self.msmrev_sparse)
self._score(self.msmrevpi_sparse)
self._score(self.msm_sparse)
def _score_cv(self, estimator):
s1 = estimator.score_cv(self.dtraj, n=5, score_method='VAMP1', score_k=2).mean()
assert 1.0 <= s1 <= 2.0
s2 = estimator.score_cv(self.dtraj, n=5, score_method='VAMP2', score_k=2).mean()
assert 1.0 <= s2 <= 2.0
se = estimator.score_cv(self.dtraj, n=5, score_method='VAMPE', score_k=2).mean()
se_inf = estimator.score_cv(self.dtraj, n=5, score_method='VAMPE', score_k=None).mean()
#TODO: what is this?
def test_score_cv(self):
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=True))
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=True, statdist_constraint=self.statdist))
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=False))
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=True, sparse=True))
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=True, statdist_constraint=self.statdist, sparse=True))
self._score_cv(MaximumLikelihoodMSM(lag=10, reversible=False, sparse=True))
# ---------------------------------
# BASIC PROPERTIES
# ---------------------------------
def test_reversible(self):
# NONREVERSIBLE
assert self.msmrev.is_reversible
assert self.msmrevpi.is_reversible
assert self.msmrev_sparse.is_reversible
assert self.msmrevpi_sparse.is_reversible
# REVERSIBLE
assert not self.msm.is_reversible
assert not self.msm_sparse.is_reversible
def _sparse(self, msm):
assert (msm.is_sparse)
def test_sparse(self):
self._sparse(self.msmrev_sparse)
self._sparse(self.msmrevpi_sparse)
self._sparse(self.msm_sparse)
def _lagtime(self, msm):
assert (msm.lagtime == self.tau)
def test_lagtime(self):
self._lagtime(self.msmrev)
self._lagtime(self.msmrevpi)
self._lagtime(self.msm)
self._lagtime(self.msmrev_sparse)
self._lagtime(self.msmrevpi_sparse)
self._lagtime(self.msm_sparse)
def _active_set(self, msm):
# should always be <= full set
self.assertLessEqual(len(msm.active_set), self.msm.nstates_full)
# should be length of nstates
self.assertEqual(len(msm.active_set), self.msm.nstates)
def test_active_set(self):
self._active_set(self.msmrev)
self._active_set(self.msmrevpi)
self._active_set(self.msm)
self._active_set(self.msmrev_sparse)
self._active_set(self.msmrevpi_sparse)
self._active_set(self.msm_sparse)
def _largest_connected_set(self, msm):
lcs = msm.largest_connected_set
# identical to first connected set
assert (np.all(lcs == msm.connected_sets[0]))
# LARGEST: identical to active set
assert (np.all(lcs == msm.active_set))
def test_largest_connected_set(self):
self._largest_connected_set(self.msmrev)
self._largest_connected_set(self.msmrevpi)
self._largest_connected_set(self.msm)
self._largest_connected_set(self.msmrev_sparse)
self._largest_connected_set(self.msmrevpi_sparse)
self._largest_connected_set(self.msm_sparse)
def _nstates(self, msm):
# should always be <= full
assert (msm.nstates <= msm.nstates_full)
# THIS DATASET:
assert (msm.nstates == 66)
def test_nstates(self):
self._nstates(self.msmrev)
self._nstates(self.msmrevpi)
self._nstates(self.msm)
self._nstates(self.msmrev_sparse)
self._nstates(self.msmrevpi_sparse)
self._nstates(self.msm_sparse)
def _connected_sets(self, msm):
cs = msm.connected_sets
assert (len(cs) >= 1)
# MODE LARGEST:
assert (np.all(cs[0] == msm.active_set))
def test_connected_sets(self):
self._connected_sets(self.msmrev)
self._connected_sets(self.msmrevpi)
self._connected_sets(self.msm)
self._connected_sets(self.msmrev_sparse)
self._connected_sets(self.msmrevpi_sparse)
self._connected_sets(self.msm_sparse)
def _connectivity(self, msm):
# HERE:
assert (msm.connectivity == 'largest')
def test_connectivity(self):
self._connectivity(self.msmrev)
self._connectivity(self.msmrevpi)
self._connectivity(self.msm)
self._connectivity(self.msmrev_sparse)
self._connectivity(self.msmrevpi_sparse)
self._connectivity(self.msm_sparse)
def _count_matrix_active(self, msm):
C = msm.count_matrix_active
assert (np.all(C.shape == (msm.nstates, msm.nstates)))
def test_count_matrix_active(self):
self._count_matrix_active(self.msmrev)
self._count_matrix_active(self.msmrevpi)
self._count_matrix_active(self.msm)
self._count_matrix_active(self.msmrev_sparse)
self._count_matrix_active(self.msmrevpi_sparse)
self._count_matrix_active(self.msm_sparse)
def _count_matrix_full(self, msm):
C = msm.count_matrix_full
assert (np.all(C.shape == (msm.nstates_full, msm.nstates_full)))
def test_count_matrix_full(self):
self._count_matrix_full(self.msmrev)
self._count_matrix_full(self.msmrevpi)
self._count_matrix_full(self.msm)
self._count_matrix_full(self.msmrev_sparse)
self._count_matrix_full(self.msmrevpi_sparse)
self._count_matrix_full(self.msm_sparse)
def _discrete_trajectories_full(self, msm):
assert (np.all(self.dtraj == msm.discrete_trajectories_full[0]))
def test_discrete_trajectories_full(self):
self._discrete_trajectories_full(self.msmrev)
self._discrete_trajectories_full(self.msmrevpi)
self._discrete_trajectories_full(self.msm)
self._discrete_trajectories_full(self.msmrev_sparse)
self._discrete_trajectories_full(self.msmrevpi_sparse)
self._discrete_trajectories_full(self.msm_sparse)
def _discrete_trajectories_active(self, msm):
dta = msm.discrete_trajectories_active
# HERE
assert (len(dta) == 1)
# HERE: states are shifted down from the beginning, because early states are missing
assert (dta[0][0] < self.dtraj[0])
def test_discrete_trajectories_active(self):
self._discrete_trajectories_active(self.msmrev)
self._discrete_trajectories_active(self.msmrevpi)
self._discrete_trajectories_active(self.msm)
self._discrete_trajectories_active(self.msmrev_sparse)
self._discrete_trajectories_active(self.msmrevpi_sparse)
self._discrete_trajectories_active(self.msm_sparse)
def _timestep(self, msm):
assert (msm.timestep_model.startswith('1'))
assert (msm.timestep_model.endswith('step'))
def test_timestep(self):
self._timestep(self.msmrev)
self._timestep(self.msmrevpi)
self._timestep(self.msm)
self._timestep(self.msmrev_sparse)
self._timestep(self.msmrevpi_sparse)
self._timestep(self.msm_sparse)
def _dt_model(self, msm):
from pyemma.util.units import TimeUnit
tu = TimeUnit("1 step").get_scaled(self.msm.lag)
self.assertEqual(msm.dt_model, tu)
def test_dt_model(self):
self._dt_model(self.msmrev)
self._dt_model(self.msmrevpi)
self._dt_model(self.msm)
self._dt_model(self.msmrev_sparse)
self._dt_model(self.msmrevpi_sparse)
self._dt_model(self.msm_sparse)
def _transition_matrix(self, msm):
P = msm.transition_matrix
# should be ndarray by default
# assert (isinstance(P, np.ndarray))
assert (isinstance(P, np.ndarray) or isinstance(P, scipy.sparse.csr_matrix))
# shape
assert (np.all(P.shape == (msm.nstates, msm.nstates)))
# test transition matrix properties
import msmtools.analysis as msmana
assert (msmana.is_transition_matrix(P))
assert (msmana.is_connected(P))
# REVERSIBLE
if msm.is_reversible:
assert (msmana.is_reversible(P))
def test_transition_matrix(self):
self._transition_matrix(self.msmrev)
self._transition_matrix(self.msmrev)
self._transition_matrix(self.msm)
self._transition_matrix(self.msmrev_sparse)
self._transition_matrix(self.msmrevpi_sparse)
self._transition_matrix(self.msm_sparse)
# ---------------------------------
# SIMPLE STATISTICS
# ---------------------------------
def _active_count_fraction(self, msm):
# should always be a fraction
assert (0.0 <= msm.active_count_fraction <= 1.0)
# special case for this data set:
assert (msm.active_count_fraction == 1.0)
def test_active_count_fraction(self):
self._active_count_fraction(self.msmrev)
self._active_count_fraction(self.msmrevpi)
self._active_count_fraction(self.msm)
self._active_count_fraction(self.msmrev_sparse)
self._active_count_fraction(self.msmrevpi_sparse)
self._active_count_fraction(self.msm_sparse)
def _active_state_fraction(self, msm):
# should always be a fraction
assert (0.0 <= msm.active_state_fraction <= 1.0)
def test_active_state_fraction(self):
# should always be a fraction
self._active_state_fraction(self.msmrev)
self._active_state_fraction(self.msmrevpi)
self._active_state_fraction(self.msm)
self._active_state_fraction(self.msmrev_sparse)
self._active_state_fraction(self.msmrevpi_sparse)
self._active_state_fraction(self.msm_sparse)
def _effective_count_matrix(self, msm):
Ceff = msm.effective_count_matrix
assert (np.all(Ceff.shape == (msm.nstates, msm.nstates)))
def test_effective_count_matrix(self):
self._effective_count_matrix(self.msmrev)
self._effective_count_matrix(self.msmrevpi)
self._effective_count_matrix(self.msm)
self._effective_count_matrix(self.msmrev_sparse)
self._effective_count_matrix(self.msmrevpi_sparse)
self._effective_count_matrix(self.msm_sparse)
# ---------------------------------
# EIGENVALUES, EIGENVECTORS
# ---------------------------------
def _statdist(self, msm):
mu = msm.stationary_distribution
# should strictly positive (irreversibility)
assert (np.all(mu > 0))
# should sum to one
assert (np.abs(np.sum(mu) - 1.0) < 1e-10)
def test_statdist(self):
self._statdist(self.msmrev)
self._statdist(self.msmrevpi)
self._statdist(self.msm)
self._statdist(self.msmrev_sparse)
self._statdist(self.msmrevpi_sparse)
self._statdist(self.msm_sparse)
def _eigenvalues(self, msm):
if not msm.is_sparse:
ev = msm.eigenvalues()
else:
k = 4
ev = msm.eigenvalues(k)
# stochasticity
assert (np.max(np.abs(ev)) <= 1 + 1e-12)
# irreducible
assert (np.max(np.abs(ev[1:])) < 1)
# ordered?
evabs = np.abs(ev)
for i in range(0, len(evabs) - 1):
assert (evabs[i] >= evabs[i + 1])
# REVERSIBLE:
if msm.is_reversible:
assert (np.all(np.isreal(ev)))
def test_eigenvalues(self):
self._eigenvalues(self.msmrev)
self._eigenvalues(self.msmrevpi)
self._eigenvalues(self.msm)
self._eigenvalues(self.msmrev_sparse)
self._eigenvalues(self.msmrevpi_sparse)
self._eigenvalues(self.msm_sparse)
def _eigenvectors_left(self, msm):
if not msm.is_sparse:
L = msm.eigenvectors_left()
k = msm.nstates
else:
k = 4
L = msm.eigenvectors_left(k)
# shape should be right
assert (np.all(L.shape == (k, msm.nstates)))
# first one should be identical to stat.dist
l1 = L[0, :]
err = msm.stationary_distribution - l1
assert (np.max(np.abs(err)) < 1e-10)
# sums should be 1, 0, 0, ...
assert (np.allclose(np.sum(L[1:, :], axis=1), np.zeros(k - 1)))
# REVERSIBLE:
if msm.is_reversible:
assert (np.all(np.isreal(L)))
def test_eigenvectors_left(self):
self._eigenvectors_left(self.msmrev)
self._eigenvectors_left(self.msmrevpi)
self._eigenvectors_left(self.msm)
self._eigenvectors_left(self.msmrev_sparse)
self._eigenvectors_left(self.msmrevpi_sparse)
self._eigenvectors_left(self.msm_sparse)
def _eigenvectors_right(self, msm):
if not msm.is_sparse:
R = msm.eigenvectors_right()
k = msm.nstates
else:
k = 4
R = msm.eigenvectors_right(k)
# shape should be right
assert (np.all(R.shape == (msm.nstates, k)))
# should be all ones
r1 = R[:, 0]
assert (np.allclose(r1, np.ones(msm.nstates)))
# REVERSIBLE:
if msm.is_reversible:
assert (np.all(np.isreal(R)))
def test_eigenvectors_right(self):
self._eigenvectors_right(self.msmrev)
self._eigenvectors_right(self.msmrevpi)
self._eigenvectors_right(self.msm)
self._eigenvectors_right(self.msmrev_sparse)
self._eigenvectors_right(self.msmrevpi_sparse)
self._eigenvectors_right(self.msm_sparse)
def _eigenvectors_RDL(self, msm):
if not msm.is_sparse:
R = msm.eigenvectors_right()
D = np.diag(msm.eigenvalues())
L = msm.eigenvectors_left()
# orthogonality constraint
assert (np.allclose(np.dot(R, L), np.eye(msm.nstates)))
# REVERSIBLE: also true for LR because reversible matrix
if msm.is_reversible:
assert (np.allclose(np.dot(L, R), np.eye(msm.nstates)))
# recover transition matrix
assert (np.allclose(np.dot(R, np.dot(D, L)), msm.transition_matrix))
else:
k = 4
R = msm.eigenvectors_right(k)
D = np.diag(msm.eigenvalues(k))
L = msm.eigenvectors_left(k)
"""Orthoginality"""
assert (np.allclose(np.dot(L, R), np.eye(k)))
"""Reversibility"""
if msm.is_reversible:
mu = msm.stationary_distribution
L_mu = mu[:,np.newaxis] * R
assert (np.allclose(np.dot(L_mu.T, R), np.eye(k)))
def test_eigenvectors_RDL(self):
self._eigenvectors_RDL(self.msmrev)
self._eigenvectors_RDL(self.msmrevpi)
self._eigenvectors_RDL(self.msm)
self._eigenvectors_RDL(self.msmrev_sparse)
self._eigenvectors_RDL(self.msmrevpi_sparse)
self._eigenvectors_RDL(self.msm_sparse)
def _timescales(self, msm):
if not msm.is_sparse:
if not msm.is_reversible:
with warnings.catch_warnings(record=True) as w:
ts = msm.timescales()
else:
ts = msm.timescales()
else:
k = 4
if not msm.is_reversible:
with warnings.catch_warnings(record=True) as w:
ts = msm.timescales(k)
else:
ts = msm.timescales(k)
# should be all positive
assert (np.all(ts > 0))
# REVERSIBLE: should be all real
if msm.is_reversible:
ts_ref = np.array([310.87, 8.5, 5.09])
assert (np.all(np.isreal(ts)))
# HERE:
np.testing.assert_almost_equal(ts[:3], ts_ref, decimal=2)
else:
ts_ref = np.array([310.49376926, 8.48302712, 5.02649564])
# HERE:
np.testing.assert_almost_equal(ts[:3], ts_ref, decimal=2)
def test_timescales(self):
self._timescales(self.msmrev)
self._timescales(self.msm)
self._timescales(self.msmrev_sparse)
self._timescales(self.msm_sparse)
# ---------------------------------
# FIRST PASSAGE PROBLEMS
# ---------------------------------
def _committor(self, msm):
a = 16
b = 48
q_forward = msm.committor_forward(a, b)
assert (q_forward[a] == 0)
assert (q_forward[b] == 1)
assert (np.all(q_forward[:30] < 0.5))
assert (np.all(q_forward[40:] > 0.5))
q_backward = msm.committor_backward(a, b)
assert (q_backward[a] == 1)
assert (q_backward[b] == 0)
assert (np.all(q_backward[:30] > 0.5))
assert (np.all(q_backward[40:] < 0.5))
# REVERSIBLE:
if msm.is_reversible:
assert (np.allclose(q_forward + q_backward, np.ones(msm.nstates)))
def test_committor(self):
self._committor(self.msmrev)
self._committor(self.msm)
self._committor(self.msmrev_sparse)
self._committor(self.msm_sparse)
def _mfpt(self, msm):
a = 16
b = 48
t = msm.mfpt(a, b)
assert (t > 0)
# HERE:
if msm.is_reversible:
np.testing.assert_allclose(t, 872.69, rtol=1e-3, atol=1e-6)
else:
np.testing.assert_allclose(t, 872.07, rtol=1e-3, atol=1e-6)
def test_mfpt(self):
self._mfpt(self.msmrev)
self._mfpt(self.msm)
self._mfpt(self.msmrev_sparse)
self._mfpt(self.msm_sparse)
# ---------------------------------
# PCCA
# ---------------------------------
def _pcca_assignment(self, msm):
if msm.is_reversible:
msm.pcca(2)
ass = msm.metastable_assignments
# test: number of states
assert (len(ass) == msm.nstates)
assert msm.n_metastable == 2
# test: should be 0 or 1
assert (np.all(ass >= 0))
assert (np.all(ass <= 1))
# should be equal (zero variance) within metastable sets
assert (np.std(ass[:30]) == 0)
assert (np.std(ass[40:]) == 0)
else:
with self.assertRaises(ValueError):
msm.pcca(2)
def test_pcca_assignment(self):
self._pcca_assignment(self.msmrev)
self._pcca_assignment(self.msm)
with warnings.catch_warnings(record=True) as w:
self._pcca_assignment(self.msmrev_sparse)
with warnings.catch_warnings(record=True) as w:
self._pcca_assignment(self.msm_sparse)
def _pcca_distributions(self, msm):
if msm.is_reversible:
msm.pcca(2)
pccadist = msm.metastable_distributions
# should be right size
assert (np.all(pccadist.shape == (2, msm.nstates)))
# should be nonnegative
assert (np.all(pccadist >= 0))
# should roughly add up to stationary:
cgdist = np.array([msm.stationary_distribution[msm.metastable_sets[0]].sum(), msm.stationary_distribution[msm.metastable_sets[1]].sum()])
ds = cgdist[0]*pccadist[0] + cgdist[1]*pccadist[1]
ds /= ds.sum()
assert (np.max(np.abs(ds - msm.stationary_distribution)) < 0.001)
else:
with self.assertRaises(ValueError):
msm.pcca(2)
def test_pcca_distributions(self):
self._pcca_distributions(self.msmrev)
self._pcca_distributions(self.msm)
self._pcca_distributions(self.msmrev_sparse)
self._pcca_distributions(self.msm_sparse)
def _pcca_memberships(self, msm):
if msm.is_reversible:
msm.pcca(2)
M = msm.metastable_memberships
# should be right size
assert (np.all(M.shape == (msm.nstates, 2)))
# should be nonnegative
assert (np.all(M >= 0))
# should add up to one:
assert (np.allclose(np.sum(M, axis=1), np.ones(msm.nstates)))
else:
with self.assertRaises(ValueError):
msm.pcca(2)
def test_pcca_memberships(self):
self._pcca_memberships(self.msmrev)
self._pcca_memberships(self.msm)
self._pcca_memberships(self.msmrev_sparse)
self._pcca_memberships(self.msm_sparse)
def _pcca_sets(self, msm):
if msm.is_reversible:
msm.pcca(2)
S = msm.metastable_sets
assignment = msm.metastable_assignments
# should coincide with assignment
for i, s in enumerate(S):
for j in range(len(s)):
assert (assignment[s[j]] == i)
else:
with self.assertRaises(ValueError):
msm.pcca(2)
def test_pcca_sets(self):
self._pcca_sets(self.msmrev)
self._pcca_sets(self.msm)
self._pcca_sets(self.msmrev_sparse)
self._pcca_sets(self.msm_sparse)
# ---------------------------------
# EXPERIMENTAL STUFF
# ---------------------------------
def _expectation(self, msm):
e = msm.expectation(list(range(msm.nstates)))
# approximately equal for both
self.assertLess(np.abs(e - 31.73), 0.01)
def test_expectation(self):
self._expectation(self.msmrev)
self._expectation(self.msm)
self._expectation(self.msmrev_sparse)
self._expectation(self.msm_sparse)
def _correlation(self, msm):
if msm.is_sparse:
k = 4
else:
k = msm.nstates
# raise assertion error because size is wrong:
maxtime = 100000
a = [1, 2, 3]
with self.assertRaises(AssertionError):
msm.correlation(a, 1)
# should decrease
a = list(range(msm.nstates))
times, corr1 = msm.correlation(a, maxtime=maxtime)
assert (len(corr1) == maxtime / msm.lagtime)
assert (len(times) == maxtime / msm.lagtime)
assert (corr1[0] > corr1[-1])
a = list(range(msm.nstates))
times, corr2 = msm.correlation(a, a, maxtime=maxtime, k=k)
# should be identical to autocorr
assert (np.allclose(corr1, corr2))
# Test: should be increasing in time
b = list(range(msm.nstates))[::-1]
times, corr3 = msm.correlation(a, b, maxtime=maxtime, )
assert (len(times) == maxtime / msm.lagtime)
assert (len(corr3) == maxtime / msm.lagtime)
assert (corr3[0] < corr3[-1])
def test_correlation(self):
self._correlation(self.msmrev)
# self._correlation(self.msm)
# self._correlation(self.msmrev_sparse)
# self._correlation(self.msm_sparse)
def _relaxation(self, msm):
if msm.is_sparse:
k = 4
else:
k = msm.nstates
pi_perturbed = (msm.stationary_distribution ** 2)
pi_perturbed /= pi_perturbed.sum()
a = list(range(msm.nstates))
maxtime = 100000
times, rel1 = msm.relaxation(msm.stationary_distribution, a, maxtime=maxtime, k=k)
# should be constant because we are in equilibrium
assert (np.allclose(rel1 - rel1[0], np.zeros((np.shape(rel1)[0]))))
times, rel2 = msm.relaxation(pi_perturbed, a, maxtime=maxtime, k=k)
# should relax
assert (len(times) == maxtime / msm.lagtime)
assert (len(rel2) == maxtime / msm.lagtime)
self.assertLess(rel2[0], rel2[-1], msm)
def test_relaxation(self):
self._relaxation(self.msmrev)
self._relaxation(self.msm)
self._relaxation(self.msmrev_sparse)
self._relaxation(self.msm_sparse)
def _fingerprint_correlation(self, msm):
if msm.is_sparse:
k = 4
else:
k = msm.nstates
if msm.is_reversible:
# raise assertion error because size is wrong:
a = [1, 2, 3]
with self.assertRaises(AssertionError):
msm.fingerprint_correlation(a, 1, k=k)
# should decrease
a = list(range(self.msm.nstates))
fp1 = msm.fingerprint_correlation(a, k=k)
# first timescale is infinite
assert (fp1[0][0] == np.inf)
# next timescales are identical to timescales:
assert (np.allclose(fp1[0][1:], msm.timescales(k-1)))
# all amplitudes nonnegative (for autocorrelation)
assert (np.all(fp1[1][:] >= 0))
# identical call
b = list(range(msm.nstates))
fp2 = msm.fingerprint_correlation(a, b, k=k)
assert (np.allclose(fp1[0], fp2[0]))
assert (np.allclose(fp1[1], fp2[1]))
# should be - of the above, apart from the first
b = list(range(msm.nstates))[::-1]
fp3 = msm.fingerprint_correlation(a, b, k=k)
assert (np.allclose(fp1[0], fp3[0]))
assert (np.allclose(fp1[1][1:], -fp3[1][1:]))
else: # raise ValueError, because fingerprints are not defined for nonreversible
with self.assertRaises(ValueError):
a = list(range(self.msm.nstates))
msm.fingerprint_correlation(a, k=k)
with self.assertRaises(ValueError):
a = list(range(self.msm.nstates))
b = list(range(msm.nstates))
msm.fingerprint_correlation(a, b, k=k)
def test_fingerprint_correlation(self):
self._fingerprint_correlation(self.msmrev)
self._fingerprint_correlation(self.msm)
self._fingerprint_correlation(self.msmrev_sparse)
self._fingerprint_correlation(self.msm_sparse)
def _fingerprint_relaxation(self, msm):
if msm.is_sparse:
k = 4
else:
k = msm.nstates
if msm.is_reversible:
# raise assertion error because size is wrong:
a = [1, 2, 3]
with self.assertRaises(AssertionError):
msm.fingerprint_relaxation(msm.stationary_distribution, a, k=k)
# equilibrium relaxation should be constant
a = list(range(msm.nstates))
fp1 = msm.fingerprint_relaxation(msm.stationary_distribution, a, k=k)
# first timescale is infinite
assert (fp1[0][0] == np.inf)
# next timescales are identical to timescales:
assert (np.allclose(fp1[0][1:], msm.timescales(k-1)))
# dynamical amplitudes should be near 0 because we are in equilibrium
assert (np.max(np.abs(fp1[1][1:])) < 1e-10)
# off-equilibrium relaxation
pi_perturbed = (msm.stationary_distribution ** 2)
pi_perturbed /= pi_perturbed.sum()
fp2 = msm.fingerprint_relaxation(pi_perturbed, a, k=k)
# first timescale is infinite
assert (fp2[0][0] == np.inf)
# next timescales are identical to timescales:
assert (np.allclose(fp2[0][1:], msm.timescales(k-1)))
# dynamical amplitudes should be significant because we are not in equilibrium
assert (np.max(np.abs(fp2[1][1:])) > 0.1)
else: # raise ValueError, because fingerprints are not defined for nonreversible
with self.assertRaises(ValueError):
a = list(range(self.msm.nstates))
msm.fingerprint_relaxation(msm.stationary_distribution, a, k=k)
with self.assertRaises(ValueError):
pi_perturbed = (msm.stationary_distribution ** 2)
pi_perturbed /= pi_perturbed.sum()
a = list(range(self.msm.nstates))
msm.fingerprint_relaxation(pi_perturbed, a)
def test_fingerprint_relaxation(self):
self._fingerprint_relaxation(self.msmrev)
self._fingerprint_relaxation(self.msm)
self._fingerprint_relaxation(self.msmrev_sparse)
self._fingerprint_relaxation(self.msm_sparse)
# ---------------------------------
# STATISTICS, SAMPLING
# ---------------------------------
def _active_state_indexes(self, msm):
I = msm.active_state_indexes
assert (len(I) == msm.nstates)
# compare to histogram
import pyemma.util.discrete_trajectories as dt
hist = dt.count_states(msm.discrete_trajectories_full)
# number of frames should match on active subset
A = msm.active_set
for i in range(A.shape[0]):
assert (I[i].shape[0] == hist[A[i]])
assert (I[i].shape[1] == 2)
def test_active_state_indexes(self):
self._active_state_indexes(self.msmrev)
self._active_state_indexes(self.msmrevpi)
self._active_state_indexes(self.msm)
self._active_state_indexes(self.msmrev_sparse)
self._active_state_indexes(self.msmrevpi_sparse)
self._active_state_indexes(self.msm_sparse)
def _generate_traj(self, msm):
T = 10
gt = msm.generate_traj(T)
# Test: should have the right dimension
assert (np.all(gt.shape == (T, 2)))
# itraj should be right
assert (np.all(gt[:, 0] == 0))
def test_generate_traj(self):
self._generate_traj(self.msmrev)
self._generate_traj(self.msmrevpi)
self._generate_traj(self.msm)
with warnings.catch_warnings(record=True) as w:
self._generate_traj(self.msmrev_sparse)
with warnings.catch_warnings(record=True) as w:
self._generate_traj(self.msmrevpi_sparse)
with warnings.catch_warnings(record=True) as w:
self._generate_traj(self.msm_sparse)
def _sample_by_state(self, msm):
nsample = 100
ss = msm.sample_by_state(nsample)
# must have the right size
assert (len(ss) == msm.nstates)
# must be correctly assigned
dtraj_active = msm.discrete_trajectories_active[0]
for i, samples in enumerate(ss):
# right shape
assert (np.all(samples.shape == (nsample, 2)))
for row in samples:
assert (row[0] == 0) # right trajectory
self.assertEqual(dtraj_active[row[1]], i)
def test_sample_by_state(self):
self._sample_by_state(self.msmrev)
self._sample_by_state(self.msmrevpi)
self._sample_by_state(self.msm)
self._sample_by_state(self.msmrev_sparse)
self._sample_by_state(self.msmrevpi_sparse)
self._sample_by_state(self.msm_sparse)
def _trajectory_weights(self, msm):
W = msm.trajectory_weights()
# should sum to 1
assert (np.abs(np.sum(W[0]) - 1.0) < 1e-6)
def test_trajectory_weights(self):
self._trajectory_weights(self.msmrev)
self._trajectory_weights(self.msmrevpi)
self._trajectory_weights(self.msm)
self._trajectory_weights(self.msmrev_sparse)
self._trajectory_weights(self.msmrevpi_sparse)
self._trajectory_weights(self.msm_sparse)
def test_simulate_MSM(self):
msm = self.msm
N=400
start=1
traj = msm.simulate(N=N, start=start)
assert (len(traj) <= N)
assert (len(np.unique(traj)) <= len(msm.transition_matrix))
assert (start == traj[0])
# ----------------------------------
# MORE COMPLEX TESTS / SANITY CHECKS
# ----------------------------------
def _two_state_kinetics(self, msm, eps=0.001):
if msm.is_sparse:
k = 4
else:
k = msm.nstates
# sanity check: k_forward + k_backward = 1.0/t2 for the two-state process
l2 = msm.eigenvectors_left(k)[1, :]
core1 = np.argmin(l2)
core2 = np.argmax(l2)
# transition time from left to right and vice versa
t12 = msm.mfpt(core1, core2)
t21 = msm.mfpt(core2, core1)
# relaxation time
t2 = msm.timescales(k)[0]
# the following should hold roughly = k12 + k21 = k2.
# sum of forward/backward rates can be a bit smaller because we are using small cores and
# therefore underestimate rates
ksum = 1.0 / t12 + 1.0 / t21
k2 = 1.0 / t2
self.assertLess(np.abs(k2 - ksum), eps)
def test_two_state_kinetics(self):
self._two_state_kinetics(self.msmrev)
self._two_state_kinetics(self.msmrevpi)
self._two_state_kinetics(self.msm)
self._two_state_kinetics(self.msmrev_sparse)
self._two_state_kinetics(self.msmrevpi_sparse)
self._two_state_kinetics(self.msm_sparse)
class TestMSMMinCountConnectivity(unittest.TestCase):
@classmethod
def setUpClass(cls):
dtraj = np.array(
[0, 3, 0, 1, 2, 3, 0, 0, 1, 0, 1, 0, 3, 1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 3, 0, 0, 3, 3, 0, 0, 1, 1, 3, 0,
1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 2, 1, 0, 3, 1, 0, 1, 0, 1, 0, 3, 0, 0, 3, 0, 0, 0, 2, 0, 0, 3,
0, 1, 0, 0, 0, 0, 3, 3, 3, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 3, 3, 3, 1, 0, 0, 0, 2, 1, 3, 0, 0])
assert (dtraj == 2).sum() == 5 # state 2 has only 5 counts,
cls.dtraj = dtraj
cls.mincount_connectivity = 6 # state 2 will be kicked out by this choice.
cls.active_set_unrestricted = np.array([0, 1, 2, 3])
cls.active_set_restricted = np.array([0, 1, 3])
def _test_connectivity(self, msm, msm_mincount):
np.testing.assert_equal(msm.active_set, self.active_set_unrestricted)
np.testing.assert_equal(msm_mincount.active_set, self.active_set_restricted)
def test_msm(self):
msm_one_over_n = estimate_markov_model(self.dtraj, lag=1, mincount_connectivity='1/n')
msm_restrict_connectivity = estimate_markov_model(self.dtraj, lag=1,
mincount_connectivity=self.mincount_connectivity)
self._test_connectivity(msm_one_over_n, msm_restrict_connectivity)
def test_bmsm(self):
from pyemma.msm import bayesian_markov_model
msm = bayesian_markov_model(self.dtraj, lag=1, mincount_connectivity='1/n')
msm_restricted = bayesian_markov_model(self.dtraj, lag=1, mincount_connectivity=self.mincount_connectivity)
self._test_connectivity(msm, msm_restricted)
@unittest.skip("""
File "/home/marscher/workspace/pyemma/pyemma/msm/estimators/_OOM_MSM.py", line 260, in oom_components
omega = np.real(R[:, 0])
IndexError: index 0 is out of bounds for axis 1 with size 0
""")
def test_oom(self):
from pyemma import msm
msm_one_over_n = msm.estimate_markov_model(self.dtraj, lag=1, mincount_connectivity='1/n', weights='oom')
# we now restrict the connectivity to have at least 6 counts, so we will loose state 2
msm_restrict_connectivity = msm.estimate_markov_model(self.dtraj, lag=1, mincount_connectivity=6, weights='oom')
self._test_connectivity(msm_one_over_n, msm_restrict_connectivity)
def test_timescales(self):
from pyemma.msm import timescales_msm
its = timescales_msm(self.dtraj, lags=[1, 2], mincount_connectivity=0, errors=None)
assert its.estimator.mincount_connectivity == 0
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
kapdop/android_kernel_lge_mako | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
chidelmun/NodeApps | node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| gpl-2.0 |
tinchoss/Python_Android | python/src/Lib/test/sample_doctest.py | 228 | 1037 | """This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print 'a'
... print
... print 'b'
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
| apache-2.0 |
phantomii/restalchemy | restalchemy/tests/functional/restapi/ra_based/microservice/middlewares.py | 1 | 1078 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.api import middlewares
from restalchemy.tests.functional.restapi.ra_based.microservice import (
contexts)
class ContextMiddleware(middlewares.ContextMiddleware):
def process_request(self, req):
ctx = contexts.Context()
req.context = ctx
result = req.get_response(self.application)
ctx.release()
return result
| apache-2.0 |
taichatha/youtube-dl | youtube_dl/extractor/ringtv.py | 124 | 1970 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
"md5": "d25945f5df41cdca2d2587165ac28720",
"info_dict": {
'id': '857645',
'ext': 'mp4',
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id').split('-')[0]
webpage = self._download_webpage(url, video_id)
if mobj.group('type') == 'news':
video_id = self._search_regex(
r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/
embed_iframe/[0-9]+/video/([0-9]+)/''',
webpage, 'real video ID')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
}
| unlicense |
leighmacdonald/msort | setup.py | 1 | 1669 | #!/bin/env python3
"""
Author: Leigh MacDonald <leigh.macdonald@gmail.com>
"""
from unittest import TextTestRunner, TestLoader
from glob import glob
from os.path import splitext, basename, join
from os import getcwd
try:
from os.path import walk
except ImportError:
from os import walk
from distutils.core import setup, Command
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = getcwd()
def finalize_options(self):
pass
def run(self):
"""
Finds all the tests modules in tests/, and runs them.
"""
testfiles = [ ]
for t in filter(lambda f: not f.endswith('__init__.py'), glob(join(self._dir, 'tests', '*.py'))):
testfiles.append('.'.join(['tests', splitext(basename(t))[0]]))
tests = TestLoader().loadTestsFromNames(testfiles)
t = TextTestRunner(verbosity = 1)
t.run(tests)
setup(
name='msort',
version='2.0',
description='Utility for sorting scene releases into grouped subfolders',
author='Leigh MacDonald',
author_email='leigh@cudd.li',
url='https://msort.cudd.li/',
packages=['msort', 'msort.check'],
scripts=['mediasort.py'],
cmdclass = { 'test': TestCommand },
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Programming Language :: Python :: 3'
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Utilities'
]
) | mit |
yesbox/ansible | lib/ansible/plugins/test/files.py | 148 | 1347 | # (c) 2015, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
from ansible import errors
class TestModule(object):
''' Ansible file jinja2 tests '''
def tests(self):
return {
# file testing
'is_dir' : isdir,
'is_file' : isfile,
'is_link' : islink,
'exists' : exists,
'link_exists' : lexists,
# path testing
'is_abs' : isabs,
'is_same_file' : samefile,
'is_mount' : ismount,
}
| gpl-3.0 |
elkingtonmcb/shogun | examples/undocumented/python_modular/transfer_multitask_clustered_logistic_regression.py | 17 | 1514 | #!/usr/bin/env python
from numpy import array,hstack,sin,cos
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def transfer_multitask_clustered_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from modshogun import BinaryLabels, RealFeatures, Task, TaskGroup, MultitaskClusteredLogisticRegression, MSG_DEBUG
features = RealFeatures(hstack((traindat,sin(traindat),cos(traindat))))
labels = BinaryLabels(hstack((label_train,label_train,label_train)))
n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors//3)
task_two = Task(n_vectors//3,2*n_vectors//3)
task_three = Task(2*n_vectors//3,n_vectors)
task_group = TaskGroup()
task_group.append_task(task_one)
task_group.append_task(task_two)
task_group.append_task(task_three)
mtlr = MultitaskClusteredLogisticRegression(1.0,100.0,features,labels,task_group,2)
#mtlr.io.set_loglevel(MSG_DEBUG)
mtlr.set_tolerance(1e-3) # use 1e-2 tolerance
mtlr.set_max_iter(100)
mtlr.train()
mtlr.set_current_task(0)
#print mtlr.get_w()
out = mtlr.apply_regression().get_labels()
return out
if __name__=='__main__':
print('TransferMultitaskClusteredLogisticRegression')
transfer_multitask_clustered_logistic_regression(*parameter_list[0])
| gpl-3.0 |
Metalab/web2serial | software/web2serial-core/tornado/test/ioloop_test.py | 2 | 18034 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import socket
import sys
import threading
import time
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, StackContext, wrap, NullContext
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipIfNonUnix, skipOnTravis
try:
from concurrent import futures
except ImportError:
futures = None
class TestIOLoop(AsyncTestCase):
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(sock.fileno(), lambda fd, events: None,
IOLoop.READ)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(Exception, self.io_loop.add_handler,
sock.fileno(), lambda fd, events: None,
IOLoop.READ)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# Issue #635: add_callback() should raise a clean exception
# if called while another thread is closing the IOLoop.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
try:
other_ioloop.add_callback(lambda: None)
except RuntimeError as e:
self.assertEqual("IOLoop is closing", str(e))
break
def test_handle_callback_exception(self):
# IOLoop.handle_callback_exception can be overridden to catch
# exceptions in callbacks.
def handle_callback_exception(callback):
self.assertIs(sys.exc_info()[0], ZeroDivisionError)
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
# remove the test StackContext that would see this uncaught
# exception as a test failure.
self.io_loop.add_callback(lambda: 1 / 0)
self.wait()
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(self.io_loop.time() + 0.01,
functools.partial(server.send, b'asdf'))
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600,
lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = []
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0),
results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
self.assertEqual(results, [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None,
IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection,
IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
# Use a NullContext to keep the exception from being caught by
# AsyncTestCase.
with NullContext():
self.io_loop.add_callback(lambda: 1/0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
with NullContext():
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1/0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# An added callback runs in the test's stack_context, so will be
# re-arised in wait().
self.io_loop.add_callback(lambda: 1/0)
with self.assertRaises(ZeroDivisionError):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1/0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_current(self):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
class TestIOLoopAddCallback(AsyncTestCase):
def setUp(self):
super(TestIOLoopAddCallback, self).setUp()
self.active_contexts = []
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback(callback, *args, **kwargs)
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
def test_pre_wrap(self):
# A pre-wrapped callback is run in the context in which it was
# wrapped, not when it was added to the IOLoop.
def f1():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped)
self.wait()
def test_pre_wrap_with_args(self):
# Same as test_pre_wrap, but the function takes arguments.
# Implementation note: The function must not be wrapped in a
# functools.partial until after it has been passed through
# stack_context.wrap
def f1(foo, bar):
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop((foo, bar))
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped, 1, bar=2)
result = self.wait()
self.assertEqual(result, (1, 2))
class TestIOLoopAddCallbackFromSignal(TestIOLoopAddCallback):
# Repeat the add_callback tests using add_callback_from_signal
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback_from_signal(callback, *args, **kwargs)
@unittest.skipIf(futures is None, "futures module not present")
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
self.io_loop.add_future(pool.submit(lambda: None),
lambda future: self.stop(future))
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
def test_add_future_stack_context(self):
ready = threading.Event()
def task():
# we must wait for the ioloop callback to be scheduled before
# the task completes to ensure that add_future adds the callback
# asynchronously (which is the scenario in which capturing
# the stack_context matters)
ready.wait(1)
assert ready.isSet(), "timed out"
raise Exception("worker")
def callback(future):
self.future = future
raise Exception("callback")
def handle_exception(typ, value, traceback):
self.exception = value
self.stop()
return True
# stack_context propagates to the ioloop callback, but the worker
# task just has its exceptions caught and saved in the Future.
with futures.ThreadPoolExecutor(1) as pool:
with ExceptionStackContext(handle_exception):
self.io_loop.add_future(pool.submit(task), callback)
ready.set()
self.wait()
self.assertEqual(self.exception.args[0], "callback")
self.assertEqual(self.future.exception().args[0], "worker")
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
self.assertEqual(self.io_loop.run_sync(lambda: 42), 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
kidaa/kythe | third_party/grpc/src/python/src/grpc/framework/base/_constants.py | 44 | 1647 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Private constants for the package."""
INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Base) internal error! :-('
| apache-2.0 |
dsaraujo/circulante | django/contrib/gis/utils/wkt.py | 419 | 1846 | """
Utilities for manipulating Geometry WKT.
"""
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, basestring):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.