repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/contrib/markup/tests.py | 102 | 4306 | # Quick tests for the markup templatetags (django.contrib.markup)
import re
import warnings
from django.template import Template, Context
from django import test
from django.utils import unittest
from django.utils.html import escape
try:
import textile
except ImportError:
textile = None
try:
import markdown
markdown_version = getattr(markdown, "version_info", 0)
except ImportError:
markdown = None
try:
import docutils
except ImportError:
docutils = None
class Templates(test.TestCase):
textile_content = """Paragraph 1
Paragraph 2 with "quotes" and @code@"""
markdown_content = """Paragraph 1
## An h2"""
rest_content = """Paragraph 1
Paragraph 2 with a link_
.. _link: http://www.example.com/"""
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning, module='django.contrib.markup')
def tearDown(self):
self.restore_warnings_state()
@unittest.skipUnless(textile, 'textile not installed')
def test_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context({'textile_content':self.textile_content})).strip()
self.assertEqual(rendered.replace('\t', ''), """<p>Paragraph 1</p>
<p>Paragraph 2 with “quotes” and <code>code</code></p>""")
@unittest.skipIf(textile, 'textile is installed')
def test_no_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context({'textile_content':self.textile_content})).strip()
self.assertEqual(rendered, escape(self.textile_content))
@unittest.skipUnless(markdown and markdown_version >= (2,1), 'markdown >= 2.1 not installed')
def test_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({'markdown_content':self.markdown_content})).strip()
pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""")
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(markdown and markdown_version >= (2,1), 'markdown >= 2.1 not installed')
def test_markdown_attribute_disable(self):
t = Template("{% load markup %}{{ markdown_content|markdown:'safe' }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context({'markdown_content':markdown_content})).strip()
self.assertTrue('@' in rendered)
@unittest.skipUnless(markdown and markdown_version >= (2,1), 'markdown >= 2.1 not installed')
def test_markdown_attribute_enable(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context({'markdown_content':markdown_content})).strip()
self.assertFalse('@' in rendered)
@unittest.skipIf(markdown, 'markdown is installed')
def test_no_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({'markdown_content':self.markdown_content})).strip()
self.assertEqual(rendered, self.markdown_content)
@unittest.skipUnless(docutils, 'docutils not installed')
def test_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(Context({'rest_content':self.rest_content})).strip()
# Different versions of docutils return slightly different HTML
try:
# Docutils v0.4 and earlier
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference" href="http://www.example.com/">link</a></p>""")
except AssertionError:
# Docutils from SVN (which will become 0.5)
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference external" href="http://www.example.com/">link</a></p>""")
@unittest.skipIf(docutils, 'docutils is installed')
def test_no_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(Context({'rest_content':self.rest_content})).strip()
self.assertEqual(rendered, self.rest_content)
| gpl-2.0 |
error10/bitcoin | qa/rpc-tests/reindex.py | 144 | 1063 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -reindex with CheckBlockIndex
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os.path
class ReindexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print "Success"
if __name__ == '__main__':
ReindexTest().main()
| mit |
tavendo/AutobahnPython | autobahn/wamp/uri.py | 3 | 13301 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import re
from autobahn.util import public
from autobahn.wamp.types import RegisterOptions, SubscribeOptions
__all__ = (
'Pattern',
'register',
'subscribe',
'error',
'convert_starred_uri'
)
def convert_starred_uri(uri):
"""
Convert a starred URI to a standard WAMP URI and a detected matching
policy. A starred URI is one that may contain the character '*' used
to mark URI wildcard components or URI prefixes. Starred URIs are
more comfortable / intuitive to use at the user/API level, but need
to be converted for use on the wire (WAMP protocol level).
This function takes a possibly starred URI, detects the matching policy
implied by stars, and returns a pair (uri, match) with any stars
removed from the URI and the detected matching policy.
An URI like 'com.example.topic1' (without any stars in it) is
detected as an exact-matching URI.
An URI like 'com.example.*' (with exactly one star at the very end)
is detected as a prefix-matching URI on 'com.example.'.
An URI like 'com.*.foobar.*' (with more than one star anywhere) is
detected as a wildcard-matching URI on 'com..foobar.' (in this example,
there are two wildcard URI components).
Note that an URI like 'com.example.*' is always detected as
a prefix-matching URI 'com.example.'. You cannot express a wildcard-matching
URI 'com.example.' using the starred URI notation! A wildcard matching on
'com.example.' is different from prefix-matching on 'com.example.' (which
matches a strict superset of the former!). This is one reason we don't use
starred URIs for WAMP at the protocol level.
"""
assert(type(uri) == str)
cnt_stars = uri.count('*')
if cnt_stars == 0:
match = 'exact'
elif cnt_stars == 1 and uri[-1] == '*':
match = 'prefix'
uri = uri[:-1]
else:
match = 'wildcard'
uri = uri.replace('*', '')
return uri, match
@public
class Pattern(object):
"""
A WAMP URI Pattern.
.. todo::
* suffix matches
* args + kwargs
* uuid converter
* multiple URI patterns per decorated object
* classes: Pattern, EndpointPattern, ..
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z0-9][a-z0-9_\-]*$")
"""
Compiled regular expression for a WAMP URI component.
"""
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
"""
Compiled regular expression for a named WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
"""
Compiled regular expression for a named and type-converted WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
def __init__(self, uri, target, options=None, check_types=False):
"""
:param uri: The URI or URI pattern, e.g. ``"com.myapp.product.<product:int>.update"``.
:type uri: str
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
:type target: callable or obj
:param options: An optional options object
:type options: None or RegisterOptions or SubscribeOptions
:param check_types: Enable automatic type checking against (Python 3.5+) type hints
specified on the ``endpoint`` callable. Types are checked at run-time on each
invocation of the ``endpoint`` callable. When a type mismatch occurs, the error
is forwarded to the callee code in ``onUserError`` override method of
:class:`autobahn.wamp.protocol.ApplicationSession`. An error
of type :class:`autobahn.wamp.exception.TypeCheckError` is also raised and
returned to the caller (via the router).
:type check_types: bool
"""
assert(type(uri) == str)
assert(len(uri) > 0)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
if target == Pattern.URI_TARGET_ENDPOINT:
assert(options is None or type(options) == RegisterOptions)
elif target == Pattern.URI_TARGET_HANDLER:
assert(options is None or type(options) == SubscribeOptions)
else:
options = None
components = uri.split('.')
pl = []
nc = {}
group_count = 0
for i in range(len(components)):
component = components[i]
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{0}>[a-z0-9_]+)".format(name))
group_count += 1
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{0}>[a-z0-9_]+)".format(name))
group_count += 1
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
if component == '':
group_count += 1
pl.append(r"([a-z0-9][a-z0-9_\-]*)")
nc[group_count] = str
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + r"\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
self._options = options
self._check_types = check_types
@public
@property
def options(self):
"""
Returns the Options instance (if present) for this pattern.
:return: None or the Options instance
:rtype: None or RegisterOptions or SubscribeOptions
"""
return self._options
@public
@property
def uri_type(self):
"""
Returns the URI type of this pattern
:return:
:rtype: Pattern.URI_TYPE_EXACT, Pattern.URI_TYPE_PREFIX or Pattern.URI_TYPE_WILDCARD
"""
return self._type
@public
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns: The URI (pattern), e.g. ``"com.myapp.product.<product:int>.update"``.
:rtype: str
"""
return self._uri
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. ``"com.myapp.product.123456.update"``.
:type uri: str
:returns: A tuple ``(args, kwargs)``
:rtype: tuple
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
@public
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns: ``True``, iff this pattern is for a procedure endpoint.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
@public
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns: ``True``, iff this pattern is for an event handler.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_HANDLER
@public
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns: ``True``, iff this pattern is for an exception.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
@public
def register(uri, options=None, check_types=False):
"""
Decorator for WAMP procedure endpoints.
:param uri:
:type uri: str
:param options:
:type options: None or RegisterOptions
:param check_types: Enable automatic type checking against (Python 3.5+) type hints
specified on the ``endpoint`` callable. Types are checked at run-time on each
invocation of the ``endpoint`` callable. When a type mismatch occurs, the error
is forwarded to the callee code in ``onUserError`` override method of
:class:`autobahn.wamp.protocol.ApplicationSession`. An error
of type :class:`autobahn.wamp.exception.TypeCheckError` is also raised and
returned to the caller (via the router).
:type check_types: bool
"""
def decorate(f):
assert(callable(f))
if uri is None:
real_uri = '{}'.format(f.__name__)
else:
real_uri = uri
if not hasattr(f, '_wampuris'):
f._wampuris = []
f._wampuris.append(Pattern(real_uri, Pattern.URI_TARGET_ENDPOINT, options, check_types))
return f
return decorate
@public
def subscribe(uri, options=None, check_types=False):
"""
Decorator for WAMP event handlers.
:param uri:
:type uri: str
:param options:
:type options: None or SubscribeOptions
:param check_types: Enable automatic type checking against (Python 3.5+) type hints
specified on the ``endpoint`` callable. Types are checked at run-time on each
invocation of the ``endpoint`` callable. When a type mismatch occurs, the error
is forwarded to the callee code in ``onUserError`` override method of
:class:`autobahn.wamp.protocol.ApplicationSession`. An error
of type :class:`autobahn.wamp.exception.TypeCheckError` is also raised and
returned to the caller (via the router).
:type check_types: bool
"""
def decorate(f):
assert(callable(f))
if not hasattr(f, '_wampuris'):
f._wampuris = []
f._wampuris.append(Pattern(uri, Pattern.URI_TARGET_HANDLER, options, check_types))
return f
return decorate
@public
def error(uri):
"""
Decorator for WAMP error classes.
"""
def decorate(cls):
assert(issubclass(cls, Exception))
if not hasattr(cls, '_wampuris'):
cls._wampuris = []
cls._wampuris.append(Pattern(uri, Pattern.URI_TARGET_EXCEPTION))
return cls
return decorate
| mit |
Arable/evepod | lib/python2.7/site-packages/gunicorn/management/commands/run_gunicorn.py | 38 | 3258 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from gunicorn.app.djangoapp import DjangoApplicationCommand
from gunicorn.config import make_settings
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if "validate_thread_sharing" in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _get_ident()))
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = _validate_thread_sharing
except ImportError:
pass
def make_options():
g_settings = make_settings(ignore=("version"))
keys = g_settings.keys()
def sorter(k):
return (g_settings[k].section, g_settings[k].order)
opts = [
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.')
]
for k in keys:
if k in ('pythonpath', 'django_settings',):
continue
setting = g_settings[k]
if not setting.cli:
continue
args = tuple(setting.cli)
kwargs = {
"dest": setting.name,
"metavar": setting.meta or None,
"action": setting.action or "store",
"type": setting.type or "string",
"default": None,
"help": "%s [%s]" % (setting.short, setting.default)
}
if kwargs["action"] != "store":
kwargs.pop("type")
opts.append(make_option(*args, **kwargs))
return tuple(opts)
GUNICORN_OPTIONS = make_options()
class Command(BaseCommand):
option_list = BaseCommand.option_list + GUNICORN_OPTIONS
help = "Starts a fully-functional Web server using gunicorn."
args = '[optional port number, or ipaddr:port or unix:/path/to/sockfile]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport=None, *args, **options):
if args:
raise CommandError('Usage is run_gunicorn %s' % self.args)
if addrport:
options['bind'] = addrport
admin_media_path = options.pop('admin_media_path', '')
DjangoApplicationCommand(options, admin_media_path).run()
| apache-2.0 |
tareqalayan/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_facts.py | 8 | 6796 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachineimage_facts
version_added: "2.1"
short_description: Get virtual machine image facts.
description:
- Get facts for virtual machine images.
options:
location:
description:
- Azure location value (ie. westus, eastus, eastus2, northcentralus, etc.). Supplying only a
location value will yield a list of available publishers for the location.
required: true
publisher:
description:
- Name of an image publisher. List image offerings associated with a particular publisher.
offer:
description:
- Name of an image offering. Combine with sku to see a list of available image versions.
sku:
description:
- Image offering SKU. Combine with offer to see a list of available versions.
version:
description:
- Specific version number of an image.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for a specific image
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
version: '7.1.20160308'
- name: List available versions
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
- name: List available offers
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
- name: List available publishers
azure_rm_virtualmachineimage_facts:
location: eastus
'''
RETURN = '''
azure_vmimages:
description: List of image dicts.
returned: always
type: list
example: []
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineImageFacts(AzureRMModuleBase):
def __init__(self, **kwargs):
self.module_arg_spec = dict(
location=dict(type='str', required=True),
publisher=dict(type='str'),
offer=dict(type='str'),
sku=dict(type='str'),
version=dict(type='str')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_vmimages=[])
)
self.location = None
self.publisher = None
self.offer = None
self.sku = None
self.version = None
super(AzureRMVirtualMachineImageFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.location and self.publisher and self.offer and self.sku and self.version:
self.results['ansible_facts']['azure_vmimages'] = self.get_item()
elif self.location and self.publisher and self.offer and self.sku:
self.results['ansible_facts']['azure_vmimages'] = self.list_images()
elif self.location and self.publisher:
self.results['ansible_facts']['azure_vmimages'] = self.list_offers()
elif self.location:
self.results['ansible_facts']['azure_vmimages'] = self.list_publishers()
return self.results
def get_item(self):
item = None
result = []
try:
item = self.compute_client.virtual_machine_images.get(self.location,
self.publisher,
self.offer,
self.sku,
self.version)
except CloudError:
pass
if item:
result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)]
return result
def list_images(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list(self.location,
self.publisher,
self.offer,
self.sku,)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list images: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_offers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_offers(self.location,
self.publisher)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list offers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_publishers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_publishers(self.location)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list publishers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def main():
AzureRMVirtualMachineImageFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
jimgong92/allezViens | venv/lib/python2.7/site-packages/pip/wheel.py | 83 | 22262 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import functools
import hashlib
import logging
import os
import re
import shutil
import stat
import sys
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme
from pip import pep425tags
from pip.utils import (call_subprocess, normalize_path, make_path_relative,
captured_stdout, remove_tracebacks)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
compileall.compile_dir(source, force=True, quiet=True)
logger.info(remove_tracebacks(stdout.getvalue()))
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base
and s.endswith('.dist-info')
# is self.req.project_name case preserving?
and s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
if not os.path.exists(destdir):
os.makedirs(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, wheel_dir, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self.wheel_dir = normalize_path(wheel_dir)
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req):
"""Build one wheel."""
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.info('Destination directory: %s', self.wheel_dir)
wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def build(self):
"""Build wheels."""
# unpack and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.is_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
elif req.editable:
logger.info(
'Skipping %s, due to being editable', req.name,
)
else:
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if self._build_one(req):
build_success.append(req)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit |
StefanRijnhart/odoo | openerp/addons/base/tests/test_view_validation.py | 396 | 3427 | # This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_view_validation.py
from lxml import etree
from StringIO import StringIO
import unittest2
from openerp.tools.view_validation import (valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label,
valid_field_in_graph, valid_field_in_tree
)
invalid_form = etree.parse(StringIO('''\
<form>
<label></label>
<group>
<div>
<page></page>
<label colspan="True"></label>
<field></field>
</div>
</group>
<notebook>
<page>
<group col="Two">
<div>
<label></label>
<field colspan="Five"> </field>
</div>
</group>
</page>
</notebook>
</form>
''')).getroot()
valid_form = etree.parse(StringIO('''\
<form string="">
<field name=""></field>
<field name=""></field>
<notebook>
<page>
<field name=""></field>
<label string=""></label>
<field name=""></field>
</page>
<page>
<group colspan="5" col="2">
<label for=""></label>
<label string="" colspan="5"></label>
</group>
</page>
</notebook>
</form>
''')).getroot()
invalid_graph = etree.parse(StringIO('''\
<graph>
<label/>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</graph>
''')).getroot()
valid_graph = etree.parse(StringIO('''\
<graph string="">
<field name=""></field>
<field name=""></field>
</graph>
''')).getroot()
invalid_tree = etree.parse(StringIO('''\
<tree>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</tree>
''')).getroot()
valid_tree = etree.parse(StringIO('''\
<tree string="">
<field name=""></field>
<field name=""></field>
<button/>
<field name=""></field>
</tree>
''')).getroot()
class test_view_validation(unittest2.TestCase):
""" Test the view validation code (but not the views themselves). """
def test_page_validation(self):
assert not valid_page_in_book(invalid_form)
assert valid_page_in_book(valid_form)
def test_all_field_validation(self):
assert not valid_att_in_field(invalid_form)
assert valid_att_in_field(valid_form)
def test_all_label_validation(self):
assert not valid_att_in_label(invalid_form)
assert valid_att_in_label(valid_form)
def test_form_string_validation(self):
assert valid_att_in_form(valid_form)
def test_graph_validation(self):
assert not valid_field_in_graph(invalid_graph)
assert valid_field_in_graph(valid_graph)
def test_tree_validation(self):
assert not valid_field_in_tree(invalid_tree)
assert valid_field_in_tree(valid_tree)
def test_colspan_datatype_validation(self):
assert not valid_type_in_colspan(invalid_form)
assert valid_type_in_colspan(valid_form)
def test_col_datatype_validation(self):
assert not valid_type_in_col(invalid_form)
assert valid_type_in_col(valid_form)
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
Zord13appdesa/python-for-android | python3-alpha/python3-src/Lib/plat-aix4/IN.py | 108 | 3622 | # Generated by h2py from /usr/include/netinet/in.h
# Included from net/nh.h
# Included from sys/machine.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
DEFAULT_GPR = 0xDEADBEEF
MSR_EE = 0x8000
MSR_PR = 0x4000
MSR_FP = 0x2000
MSR_ME = 0x1000
MSR_FE = 0x0800
MSR_FE0 = 0x0800
MSR_SE = 0x0400
MSR_BE = 0x0200
MSR_IE = 0x0100
MSR_FE1 = 0x0100
MSR_AL = 0x0080
MSR_IP = 0x0040
MSR_IR = 0x0020
MSR_DR = 0x0010
MSR_PM = 0x0004
DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
CR_LT = 0x80000000
CR_GT = 0x40000000
CR_EQ = 0x20000000
CR_SO = 0x10000000
CR_FX = 0x08000000
CR_FEX = 0x04000000
CR_VX = 0x02000000
CR_OX = 0x01000000
XER_SO = 0x80000000
XER_OV = 0x40000000
XER_CA = 0x20000000
def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
def XER_LENGTH(xer): return (xer & 0x0000007F)
DSISR_IO = 0x80000000
DSISR_PFT = 0x40000000
DSISR_LOCK = 0x20000000
DSISR_FPIO = 0x10000000
DSISR_PROT = 0x08000000
DSISR_LOOP = 0x04000000
DSISR_DRST = 0x04000000
DSISR_ST = 0x02000000
DSISR_SEGB = 0x01000000
DSISR_DABR = 0x00400000
DSISR_EAR = 0x00100000
SRR_IS_PFT = 0x40000000
SRR_IS_ISPEC = 0x20000000
SRR_IS_IIO = 0x10000000
SRR_IS_GUARD = 0x10000000
SRR_IS_PROT = 0x08000000
SRR_IS_LOOP = 0x04000000
SRR_PR_FPEN = 0x00100000
SRR_PR_INVAL = 0x00080000
SRR_PR_PRIV = 0x00040000
SRR_PR_TRAP = 0x00020000
SRR_PR_IMPRE = 0x00010000
def BUID_7F_SRVAL(raddr): return (0x87F00000 | (((uint)(raddr)) >> 28))
BT_256M = 0x1FFC
BT_128M = 0x0FFC
BT_64M = 0x07FC
BT_32M = 0x03FC
BT_16M = 0x01FC
BT_8M = 0x00FC
BT_4M = 0x007C
BT_2M = 0x003C
BT_1M = 0x001C
BT_512K = 0x000C
BT_256K = 0x0004
BT_128K = 0x0000
BT_NOACCESS = 0x0
BT_RDONLY = 0x1
BT_WRITE = 0x2
BT_VS = 0x2
BT_VP = 0x1
def BAT_ESEG(dbatu): return (((uint)(dbatu) >> 28))
MIN_BAT_SIZE = 0x00020000
MAX_BAT_SIZE = 0x10000000
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_LOCAL = 63
IPPROTO_EON = 80
IPPROTO_BIP = 0x53
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_TIMESERVER = 37
def IN_CLASSA(i): return (((int)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((int)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((int)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((int)(i) & 0xf0000000) == 0xe0000000)
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
def IN_EXPERIMENTAL(i): return (((int)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((int)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_BROADCAST = 0xffffffff
INADDR_LOOPBACK = 0x7f000001
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| apache-2.0 |
appsembler/edx-platform | openedx/core/djangoapps/api_admin/tests/test_forms.py | 13 | 1508 | #pylint: disable=missing-docstring
import ddt
from django.test import TestCase
from openedx.core.djangoapps.api_admin.forms import ApiAccessRequestForm, ViewersWidget
from openedx.core.djangoapps.api_admin.tests.utils import VALID_DATA
from openedx.core.djangolib.testing.utils import skip_unless_lms
@skip_unless_lms
@ddt.ddt
class ApiAccessFormTest(TestCase):
@ddt.data(
(VALID_DATA, True),
({}, False),
(dict(VALID_DATA, terms_of_service=False), False)
)
@ddt.unpack
def test_form_valid(self, data, is_valid):
form = ApiAccessRequestForm(data)
self.assertEqual(form.is_valid(), is_valid)
@skip_unless_lms
class ViewersWidgetTest(TestCase):
widget = ViewersWidget()
def test_render_value(self):
"""
Verify that ViewersWidget always displays serialized value on rendering.
"""
dummy_string_value = 'staff, verified'
input_field_name = 'viewers'
expected_widget_html = '<input type="text" name="{input_field_name}" value="{serialized_value}" />'.format(
input_field_name=input_field_name,
serialized_value=dummy_string_value,
)
output = self.widget.render(name=input_field_name, value=dummy_string_value)
self.assertEqual(expected_widget_html, output)
dummy_list_value = ['staff', 'verified']
output = self.widget.render(name=input_field_name, value=dummy_list_value)
self.assertEqual(expected_widget_html, output)
| agpl-3.0 |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/fun/doku_cc.py | 1 | 11442 | # -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
import Queue
import threading
from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer
from Plugins.Extensions.MediaPortal.resources.menuhelper import MenuHelper
from Plugins.Extensions.MediaPortal.resources.twagenthelper import twAgentGetPage
DCC_Version = "Doku.cc v0.95"
DCC_siteEncoding = 'utf-8'
"""
Sondertastenbelegung:
Genre Auswahl:
KeyCancel : Menu Up / Exit
KeyOK : Menu Down / Select
Doku Auswahl:
Bouquet +/- : Seitenweise blättern in 1er Schritten Up/Down
'1', '4', '7',
'3', 6', '9' : blättern in 2er, 5er, 10er Schritten Down/Up
Rot/Blau : Die Beschreibung Seitenweise scrollen
Stream Auswahl:
Rot/Blau : Die Beschreibung Seitenweise scrollen
"""
class show_DCC_Genre(MenuHelper):
def __init__(self, session):
genres = [
("Neue Top Dokus","/toplist.php"),
("Neue Dokus in HD","?high-definition=1"),
("Neueste Dokus", ""),
("Die besten Dokus", "?die-besten-dokus=1"),
("Suche...", "?s=")
]
MenuHelper.__init__(self, session, 0, [genres], "http://doku.cc", "", self._defaultlistcenter)
self['title'] = Label(DCC_Version)
self['ContentTitle'] = Label("Genres")
self.param_qr = ''
self.onLayoutFinish.append(self.mh_initMenu)
def mh_parseData(self, data):
print 'parseData:'
entrys = []
for m in re.finditer('class="dokuTag.*?href="(.*?)".*?class="tag.*?>(.*?)</div>', data, re.S):
href, nm = m.groups()
entrys.append((href.strip(), nm))
return entrys
def mh_callGenreListScreen(self):
if re.search('Suche...', self.mh_genreTitle):
self.paraQuery()
else:
genreurl = self.mh_genreUrl[0]+self.mh_genreUrl[1]
self.session.open(DCC_FilmListeScreen, genreurl, self.mh_genreTitle)
def paraQuery(self):
self.param_qr = ''
self.session.openWithCallback(self.cb_paraQuery, VirtualKeyBoardExt, title = (_("Enter search criteria")), text = self.param_qr, is_dialog=True)
def cb_paraQuery(self, callback = None, entry = None):
if callback != None:
self.param_qr = callback.strip()
if len(self.param_qr) > 0:
qr = self.param_qr.replace(' ','-')
genreurl = self.mh_genreBase+self.mh_genreUrl[0]+qr
self.session.open(DCC_FilmListeScreen, genreurl, self.mh_genreTitle)
class DCC_FilmListeScreen(MPScreen, ThumbsHelper):
def __init__(self, session, genreLink, genreName):
self.genreLink = genreLink
self.genreName = genreName
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/dokuListScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/dokuListScreen.xml"
print path
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["OkCancelActions", "ShortcutActions", "ColorActions", "SetupActions", "NumberActions", "MenuActions", "EPGSelectActions","DirectionActions"], {
"ok" : self.keyOK,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"upUp" : self.key_repeatedUp,
"rightUp" : self.key_repeatedUp,
"leftUp" : self.key_repeatedUp,
"downUp" : self.key_repeatedUp,
"upRepeated" : self.keyUpRepeated,
"downRepeated" : self.keyDownRepeated,
"rightRepeated" : self.keyRightRepeated,
"leftRepeated" : self.keyLeftRepeated,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"1" : self.key_1,
"3" : self.key_3,
"4" : self.key_4,
"6" : self.key_6,
"7" : self.key_7,
"9" : self.key_9,
"0" : self.closeAll,
"blue" : self.keyTxtPageDown,
"red" : self.keyTxtPageUp
}, -1)
self.baseUrl = 'http://doku.cc/'
self.sortOrder = 0
self.genreTitle = ""
self.sortParIMDB = ""
self.sortParAZ = ""
self.sortOrderStrAZ = ""
self.sortOrderStrIMDB = ""
self.sortOrderStrGenre = ""
self['title'] = Label(DCC_Version)
self['F1'] = Label(_("Text-"))
self['F4'] = Label(_("Text+"))
self['Page'] = Label(_("Page:"))
self.filmQ = Queue.Queue(0)
self.hanQ = Queue.Queue(0)
self.picQ = Queue.Queue(0)
self.updateP = 0
self.eventL = threading.Event()
self.eventP = threading.Event()
self.keyLocked = True
self.dokusListe = []
self.keckse = CookieJar()
self.page = 0
self.pages = 0;
self.setGenreStrTitle()
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def setGenreStrTitle(self):
genreName = "%s%s" % (self.genreTitle,self.genreName)
#print genreName
self['ContentTitle'].setText(genreName)
def loadPage(self):
print "loadPage:"
if not self.page:
url = self.genreLink
else:
page = self.page
if not self.genreLink or self.genreLink.startswith('?'):
url = 'page/%d' % page + self.genreLink
elif self.genreLink.startswith('/'):
url = self.genreLink + '?page=%d' % page
else:
url = self.genreLink + '/page/%d' % page
if url and url[0] == '/':
url = self.baseUrl + url[1:]
elif not url.startswith('http'):
url = self.baseUrl + url
if self.page:
self['page'].setText("%d / %d" % (self.page,self.pages))
self.filmQ.put(url)
if not self.eventL.is_set():
self.eventL.set()
self.loadPageQueued()
print "eventL ",self.eventL.is_set()
def loadPageQueued(self):
print "loadPageQueued:"
self['name'].setText(_('Please wait...'))
while not self.filmQ.empty():
url = self.filmQ.get_nowait()
print url
twAgentGetPage(url, cookieJar=self.keckse).addCallback(self.loadPageData).addErrback(self.dataError)
def dataError(self, error):
self.eventL.clear()
print "dataError:"
printl(error,self,"E")
self.dokusListe = []
self.dokusListe.append(("Nichts gefunden!","","",""))
self.ml.setList(map(self._defaultlistleft, self.dokusListe))
def loadPageData(self, data):
print "loadPageData:"
self.dokusListe = []
a = 0
l = len(data)
while a < l:
mg = re.search('<div id="post-(.*?\.\.\.)', data[a:], re.S)
if mg:
a += mg.end()
m1 = re.search('<a href="(.*?)".*?<img src="(.*?)".*?none;"> (.*?) </a></h2.*? (.*?\.\.\.)', mg.group(1), re.S)
if m1:
desc = decodeHtml(m1.group(4).split('>')[-1].strip())
url = self.baseUrl+m1.group(1)
if m1.group(2).startswith('/'):
img = self.baseUrl+m1.group(2)[1:]
else:
img = self.baseUrl+m1.group(2)
tit = decodeHtml(m1.group(3))
self.dokusListe.append((tit, url, img, desc))
else:
a = l
if self.dokusListe:
print "Dokus found !"
m = re.search(' Page.*?of (\d+)', data)
if not m:
m = re.search('href=".*?page/(\d+)"', data)
try:
pages = int(m.group(1))
except:
pages = 1
if pages > self.pages:
self.pages = pages
if not self.page:
self.page = 1
print "Page: %d / %d" % (self.page,self.pages)
self['page'].setText("%d / %d" % (self.page,self.pages))
self.ml.setList(map(self._defaultlistleft, self.dokusListe))
self.th_ThumbsQuery(self.dokusListe, 0, 1, 2, None, None, self.page, self.pages, mode=1)
self.loadPicQueued()
else:
print "No dokus found!"
self.dokusListe.append(("Keine Dokus gefunden!","","",""))
self.ml.setList(map(self._defaultlistleft, self.dokusListe))
if self.filmQ.empty():
self.eventL.clear()
else:
self.loadPageQueued()
def loadPic(self):
print "loadPic:"
if self.picQ.empty():
self.eventP.clear()
print "picQ is empty"
return
if self.updateP:
print "Pict. or descr. update in progress"
print "eventP: ",self.eventP.is_set()
print "updateP: ",self.updateP
return
while not self.picQ.empty():
self.picQ.get_nowait()
streamName = self['liste'].getCurrent()[0][0]
self['name'].setText(streamName)
streamPic = self['liste'].getCurrent()[0][2]
#print "streamName: ",streamName
#print "streamPic: ",streamPic
#print "streamUrl: ",streamUrl
self.updateP = 1
CoverHelper(self['coverArt'], self.ShowCoverFileExit).getCover(streamPic)
def getHandlung(self, desc):
print "getHandlung:"
if desc == None:
print "No Infos found !"
self['handlung'].setText(_("No further information available!"))
return
self.setHandlung(desc)
def setHandlung(self, data):
print "setHandlung:"
self['handlung'].setText(data)
def ShowCoverFileExit(self):
print "showCoverExitFile:"
self.updateP = 0;
self.keyLocked = False
if not self.filmQ.empty():
self.loadPageQueued()
else:
self.eventL.clear()
self.loadPic()
def loadPicQueued(self):
print "loadPicQueued:"
self.picQ.put(None)
if not self.eventP.is_set():
self.eventP.set()
desc = self['liste'].getCurrent()[0][3]
self.getHandlung(desc)
self.loadPic()
print "eventP: ",self.eventP.is_set()
def parseStream(self, data):
print "parseStream:"
m2 = re.search('//www.youtube.com/(embed|v)/(.*?)(\?|" |&)', data)
if m2:
print "Streams found"
dhVideoId = m2.group(2)
dhTitle = self['liste'].getCurrent()[0][0]
self.session.open(
YoutubePlayer,
[(dhTitle, dhVideoId, None)],
showPlaylist=False
)
else:
print "No stream found"
self.session.open(MessageBoxExt,"Kein Stream gefunden!", MessageBoxExt.TYPE_INFO, timeout=10)
def keyOK(self):
if (self.keyLocked|self.eventL.is_set()):
return
streamLink = self['liste'].getCurrent()[0][1]
twAgentGetPage(streamLink).addCallback(self.parseStream).addErrback(self.dataError)
def keyUpRepeated(self):
#print "keyUpRepeated"
if self.keyLocked:
return
self['liste'].up()
def keyDownRepeated(self):
#print "keyDownRepeated"
if self.keyLocked:
return
self['liste'].down()
def key_repeatedUp(self):
#print "key_repeatedUp"
if self.keyLocked:
return
self.loadPicQueued()
def keyLeftRepeated(self):
if self.keyLocked:
return
self['liste'].pageUp()
def keyRightRepeated(self):
if self.keyLocked:
return
self['liste'].pageDown()
def keyPageDown(self):
#print "keyPageDown()"
self.keyPageDownFast(1)
def keyPageUp(self):
#print "keyPageUp()"
self.keyPageUpFast(1)
def keyPageUpFast(self,step):
if self.keyLocked:
return
#print "keyPageUpFast: ",step
oldpage = self.page
if (self.page + step) <= self.pages:
self.page += step
else:
self.page = 1
#print "Page %d/%d" % (self.page,self.pages)
if oldpage != self.page:
self.loadPage()
def keyPageDownFast(self,step):
if self.keyLocked:
return
print "keyPageDownFast: ",step
oldpage = self.page
if (self.page - step) >= 1:
self.page -= step
else:
self.page = self.pages
#print "Page %d/%d" % (self.page,self.pages)
if oldpage != self.page:
self.loadPage()
def key_1(self):
#print "keyPageDownFast(2)"
self.keyPageDownFast(2)
def key_4(self):
#print "keyPageDownFast(5)"
self.keyPageDownFast(5)
def key_7(self):
#print "keyPageDownFast(10)"
self.keyPageDownFast(10)
def key_3(self):
#print "keyPageUpFast(2)"
self.keyPageUpFast(2)
def key_6(self):
#print "keyPageUpFast(5)"
self.keyPageUpFast(5)
def key_9(self):
#print "keyPageUpFast(10)"
self.keyPageUpFast(10) | gpl-2.0 |
jaingaurav/ansible | test/units/parsing/test_mod_args.py | 36 | 4933 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.errors import AnsibleParserError
from ansible.compat.tests import unittest
class TestModArgsDwim(unittest.TestCase):
# TODO: add tests that construct ModuleArgsParser with a task reference
# TODO: verify the AnsibleError raised on failure knows the task
# and the task knows the line numbers
def setUp(self):
pass
def _debug(self, mod, args, to):
print("RETURNED module = {0}".format(mod))
print(" args = {0}".format(args))
print(" to = {0}".format(to))
def tearDown(self):
pass
def test_basic_shell(self):
m = ModuleArgsParser(dict(shell='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
_uses_shell = True,
))
self.assertIsNone(to)
def test_basic_command(self):
m = ModuleArgsParser(dict(command='echo hi'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
_raw_params = 'echo hi',
))
self.assertIsNone(to)
def test_shell_with_modifiers(self):
m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'command')
self.assertEqual(args, dict(
creates = '/tmp/baz',
removes = '/tmp/bleep',
_raw_params = '/bin/foo',
_uses_shell = True,
))
self.assertIsNone(to)
def test_normal_usage(self):
m = ModuleArgsParser(dict(copy='src=a dest=b'))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_complex_args(self):
m = ModuleArgsParser(dict(copy=dict(src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex(self):
m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b')))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_action_with_complex_and_complex_args(self):
m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b'))))
mod, args, to = m.parse()
self._debug(mod, args, to)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIsNone(to)
def test_local_action_string(self):
m = ModuleArgsParser(dict(local_action='copy src=a dest=b'))
mod, args, connection = m.parse()
self._debug(mod, args, connection)
self.assertEqual(mod, 'copy')
self.assertEqual(args, dict(src='a', dest='b'))
self.assertIs(connection, 'local')
def test_multiple_actions(self):
m = ModuleArgsParser(dict(action='shell echo hi', local_action='shell echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(local_action='shell echo hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
m = ModuleArgsParser(dict(ping='data=hi', shell='echo hi'))
self.assertRaises(AnsibleParserError, m.parse)
| gpl-3.0 |
apache/incubator-airflow | tests/providers/google/cloud/operators/test_cloud_sql.py | 5 | 32641 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import os
import unittest
from unittest import mock
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.google.cloud.operators.cloud_sql import (
CloudSQLCreateInstanceDatabaseOperator,
CloudSQLCreateInstanceOperator,
CloudSQLDeleteInstanceDatabaseOperator,
CloudSQLDeleteInstanceOperator,
CloudSQLExecuteQueryOperator,
CloudSQLExportInstanceOperator,
CloudSQLImportInstanceOperator,
CloudSQLInstancePatchOperator,
CloudSQLPatchInstanceDatabaseOperator,
)
PROJECT_ID = os.environ.get('PROJECT_ID', 'project-id')
INSTANCE_NAME = os.environ.get('INSTANCE_NAME', 'test-name')
DB_NAME = os.environ.get('DB_NAME', 'db1')
CREATE_BODY = {
"name": INSTANCE_NAME,
"settings": {
"tier": "db-n1-standard-1",
"backupConfiguration": {
"binaryLogEnabled": True,
"enabled": True,
"replicationLogArchivingEnabled": True,
"startTime": "05:00",
},
"activationPolicy": "ALWAYS",
"authorizedGaeApplications": [],
"crashSafeReplicationEnabled": True,
"dataDiskSizeGb": 30,
"dataDiskType": "PD_SSD",
"databaseFlags": [],
"ipConfiguration": {
"ipv4Enabled": True,
"authorizedNetworks": [
{
"value": "192.168.100.0/24",
"name": "network1",
"expirationTime": "2012-11-15T16:19:00.094Z",
},
],
"privateNetwork": "/vpc/resource/link",
"requireSsl": True,
},
"locationPreference": {
"zone": "europe-west4-a",
"followGaeApplication": "/app/engine/application/to/follow",
},
"maintenanceWindow": {"hour": 5, "day": 7, "updateTrack": "canary"},
"pricingPlan": "PER_USE",
"replicationType": "ASYNCHRONOUS",
"storageAutoResize": False,
"storageAutoResizeLimit": 0,
"userLabels": {"my-key": "my-value"},
},
"databaseVersion": "MYSQL_5_7",
"failoverReplica": {"name": "replica-1"},
"masterInstanceName": "master-instance-1",
"onPremisesConfiguration": {},
"region": "europe-west4",
"replicaConfiguration": {
"mysqlReplicaConfiguration": {
"caCertificate": "cert-pem",
"clientCertificate": "cert-pem",
"clientKey": "cert-pem",
"connectRetryInterval": 30,
"dumpFilePath": "/path/to/dump",
"masterHeartbeatPeriod": 100,
"password": "secret_pass",
"sslCipher": "list-of-ciphers",
"username": "user",
"verifyServerCertificate": True,
},
},
}
PATCH_BODY = {
"name": INSTANCE_NAME,
"settings": {"tier": "db-n1-standard-2", "dataDiskType": "PD_HDD"},
"region": "europe-west4",
}
DATABASE_INSERT_BODY = {
"name": DB_NAME, # The name of the database in the Cloud SQL instance.
# This does not include the project ID or instance name.
"project": PROJECT_ID, # The project ID of the project containing the Cloud SQL
# database. The Google apps domain is prefixed if
# applicable.
"instance": INSTANCE_NAME, # The name of the Cloud SQL instance.
# This does not include the project ID.
}
DATABASE_PATCH_BODY = {"charset": "utf16", "collation": "utf16_general_ci"}
EXPORT_BODY = {
"exportContext": {
"fileType": "CSV",
"uri": "gs://bucketName/fileName",
"databases": [],
"sqlExportOptions": {"tables": ["table1", "table2"], "schemaOnly": False},
"csvExportOptions": {"selectQuery": "SELECT * FROM TABLE"},
}
}
IMPORT_BODY = {
"importContext": {
"fileType": "CSV",
"uri": "gs://bucketName/fileName",
"database": "db1",
"importUser": "",
"csvImportOptions": {"table": "my_table", "columns": ["col1", "col2"]},
}
}
class TestCloudSql(unittest.TestCase):
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_create(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = False
mock_hook.return_value.create_instance.return_value = True
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=CREATE_BODY, task_id="id"
)
result = op.execute(
context={'task_instance': mock.Mock()} # pylint: disable=assignment-from-no-return
)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=PROJECT_ID, body=CREATE_BODY
)
self.assertIsNone(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_create_missing_project_id(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = False
mock_hook.return_value.create_instance.return_value = True
op = CloudSQLCreateInstanceOperator(instance=INSTANCE_NAME, body=CREATE_BODY, task_id="id")
result = op.execute(
context={'task_instance': mock.Mock()} # pylint: disable=assignment-from-no-return
)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_called_once_with(project_id=None, body=CREATE_BODY)
self.assertIsNone(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_create_idempotent(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = True
mock_hook.return_value.create_instance.return_value = True
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=CREATE_BODY, task_id="id"
)
result = op.execute(
context={'task_instance': mock.Mock()} # pylint: disable=assignment-from-no-return
)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_create_should_throw_ex_when_empty_project_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = CloudSQLCreateInstanceOperator(
project_id="", body=CREATE_BODY, instance=INSTANCE_NAME, task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'project_id' is empty", str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_create_should_throw_ex_when_empty_body(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, body={}, instance=INSTANCE_NAME, task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'body' is empty", str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_create_should_throw_ex_when_empty_instance(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, body=CREATE_BODY, instance="", task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'instance' is empty", str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_create_should_validate_list_type(self, mock_hook):
wrong_list_type_body = {
"name": INSTANCE_NAME,
"settings": {
"tier": "db-n1-standard-1",
"ipConfiguration": {
"authorizedNetworks": {} # Should be a list, not a dict.
# Testing if the validation catches this.
},
},
}
with self.assertRaises(AirflowException) as cm:
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, body=wrong_list_type_body, instance=INSTANCE_NAME, task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn(
"The field 'settings.ipConfiguration.authorizedNetworks' "
"should be of list type according to the specification",
str(err),
)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_create_should_validate_non_empty_fields(self, mock_hook):
empty_tier_body = {
"name": INSTANCE_NAME,
"settings": {
"tier": "", # Field can't be empty (defined in CLOUD_SQL_VALIDATION).
# Testing if the validation catches this.
},
}
with self.assertRaises(AirflowException) as cm:
op = CloudSQLCreateInstanceOperator(
project_id=PROJECT_ID, body=empty_tier_body, instance=INSTANCE_NAME, task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The body field 'settings.tier' can't be empty. Please provide a value.", str(err))
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_patch(self, mock_hook):
mock_hook.return_value.patch_instance.return_value = True
op = CloudSQLInstancePatchOperator(
project_id=PROJECT_ID, body=PATCH_BODY, instance=INSTANCE_NAME, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_instance.assert_called_once_with(
project_id=PROJECT_ID, body=PATCH_BODY, instance=INSTANCE_NAME
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_patch_missing_project_id(self, mock_hook):
mock_hook.return_value.patch_instance.return_value = True
op = CloudSQLInstancePatchOperator(body=PATCH_BODY, instance=INSTANCE_NAME, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_instance.assert_called_once_with(
project_id=None, body=PATCH_BODY, instance=INSTANCE_NAME
)
self.assertTrue(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLInstancePatchOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_patch_should_bubble_up_ex_if_not_exists(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = False
with self.assertRaises(AirflowException) as cm:
op = CloudSQLInstancePatchOperator(
project_id=PROJECT_ID, body=PATCH_BODY, instance=INSTANCE_NAME, task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn('specify another instance to patch', str(err))
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_instance.assert_not_called()
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_delete(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = True
op = CloudSQLDeleteInstanceOperator(project_id=PROJECT_ID, instance=INSTANCE_NAME, task_id="id")
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_delete_missing_project_id(self, mock_hook, _check_if_instance_exists):
_check_if_instance_exists.return_value = True
op = CloudSQLDeleteInstanceOperator(instance=INSTANCE_NAME, task_id="id")
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceOperator._check_if_instance_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_delete_should_abort_and_succeed_if_not_exists(
self, mock_hook, _check_if_instance_exists
):
_check_if_instance_exists.return_value = False
op = CloudSQLDeleteInstanceOperator(project_id=PROJECT_ID, instance=INSTANCE_NAME, task_id="id")
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_not_called()
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_create(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = False
op = CloudSQLCreateInstanceDatabaseOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=DATABASE_INSERT_BODY, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=DATABASE_INSERT_BODY
)
self.assertTrue(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_create_missing_project_id(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = False
op = CloudSQLCreateInstanceDatabaseOperator(
instance=INSTANCE_NAME, body=DATABASE_INSERT_BODY, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME, body=DATABASE_INSERT_BODY
)
self.assertTrue(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLCreateInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_create_should_abort_and_succeed_if_exists(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = True
op = CloudSQLCreateInstanceDatabaseOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=DATABASE_INSERT_BODY, task_id="id"
)
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_not_called()
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLPatchInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_patch(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = True
op = CloudSQLPatchInstanceDatabaseOperator(
project_id=PROJECT_ID,
instance=INSTANCE_NAME,
database=DB_NAME,
body=DATABASE_PATCH_BODY,
task_id="id",
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_database.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME, body=DATABASE_PATCH_BODY
)
self.assertTrue(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLPatchInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_patch_missing_project_id(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = True
op = CloudSQLPatchInstanceDatabaseOperator(
instance=INSTANCE_NAME, database=DB_NAME, body=DATABASE_PATCH_BODY, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_database.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME, database=DB_NAME, body=DATABASE_PATCH_BODY
)
self.assertTrue(result)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLPatchInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_patch_should_throw_ex_if_not_exists(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = False
with self.assertRaises(AirflowException) as cm:
op = CloudSQLPatchInstanceDatabaseOperator(
project_id=PROJECT_ID,
instance=INSTANCE_NAME,
database=DB_NAME,
body=DATABASE_PATCH_BODY,
task_id="id",
)
op.execute(None)
err = cm.exception
self.assertIn("Cloud SQL instance with ID", str(err))
self.assertIn("does not contain database", str(err))
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.patch_database.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_patch_should_throw_ex_when_empty_database(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = CloudSQLPatchInstanceDatabaseOperator(
project_id=PROJECT_ID,
instance=INSTANCE_NAME,
database="",
body=DATABASE_INSERT_BODY,
task_id="id",
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'database' is empty", str(err))
mock_hook.assert_not_called()
mock_hook.return_value.patch_database.assert_not_called()
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_delete(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = True
op = CloudSQLDeleteInstanceDatabaseOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME, task_id="id"
)
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_delete_missing_project_id(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = True
op = CloudSQLDeleteInstanceDatabaseOperator(instance=INSTANCE_NAME, database=DB_NAME, task_id="id")
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME, database=DB_NAME
)
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_sql"
".CloudSQLDeleteInstanceDatabaseOperator._check_if_db_exists"
)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_db_delete_should_abort_and_succeed_if_not_exists(self, mock_hook, _check_if_db_exists):
_check_if_db_exists.return_value = False
op = CloudSQLDeleteInstanceDatabaseOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME, task_id="id"
)
result = op.execute(None)
self.assertTrue(result)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_export(self, mock_hook):
mock_hook.return_value.export_instance.return_value = True
op = CloudSQLExportInstanceOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=EXPORT_BODY, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.export_instance.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=EXPORT_BODY
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_export_missing_project_id(self, mock_hook):
mock_hook.return_value.export_instance.return_value = True
op = CloudSQLExportInstanceOperator(instance=INSTANCE_NAME, body=EXPORT_BODY, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.export_instance.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME, body=EXPORT_BODY
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_import(self, mock_hook):
mock_hook.return_value.export_instance.return_value = True
op = CloudSQLImportInstanceOperator(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=IMPORT_BODY, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.import_instance.assert_called_once_with(
project_id=PROJECT_ID, instance=INSTANCE_NAME, body=IMPORT_BODY
)
self.assertTrue(result)
@mock.patch("airflow.providers.google.cloud.operators.cloud_sql.CloudSQLHook")
def test_instance_import_missing_project_id(self, mock_hook):
mock_hook.return_value.export_instance.return_value = True
op = CloudSQLImportInstanceOperator(instance=INSTANCE_NAME, body=IMPORT_BODY, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1beta4",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.import_instance.assert_called_once_with(
project_id=None, instance=INSTANCE_NAME, body=IMPORT_BODY
)
self.assertTrue(result)
class TestCloudSqlQueryValidation(unittest.TestCase):
@staticmethod
def _setup_connections(get_connection, uri):
gcp_connection = mock.MagicMock()
gcp_connection.extra_dejson = mock.MagicMock()
gcp_connection.extra_dejson.get.return_value = 'empty_project'
cloudsql_connection = Connection(uri=uri)
cloudsql_connection2 = Connection(uri=uri)
get_connection.side_effect = [gcp_connection, cloudsql_connection, cloudsql_connection2]
@parameterized.expand(
[
(
'project_id',
'',
'instance_name',
'mysql',
False,
False,
'SELECT * FROM TEST',
"The required extra 'location' is empty",
),
(
'project_id',
'location',
'',
'postgres',
False,
False,
'SELECT * FROM TEST',
"The required extra 'instance' is empty",
),
(
'project_id',
'location',
'instance_name',
'wrong',
False,
False,
'SELECT * FROM TEST',
"Invalid database type 'wrong'. Must be one of ['postgres', 'mysql']",
),
(
'project_id',
'location',
'instance_name',
'postgres',
True,
True,
'SELECT * FROM TEST',
"Cloud SQL Proxy does not support SSL connections. SSL is not needed as"
" Cloud SQL Proxy provides encryption on its own",
),
(
'project_id',
'location',
'instance_name',
'postgres',
False,
True,
'SELECT * FROM TEST',
"SSL connections requires sslcert to be set",
),
]
)
@mock.patch("airflow.hooks.base.BaseHook.get_connection")
def test_create_operator_with_wrong_parameters(
self,
project_id,
location,
instance_name,
database_type,
use_proxy,
use_ssl,
sql,
message,
get_connection,
):
uri = (
"gcpcloudsql://user:password@127.0.0.1:3200/testdb?"
"database_type={database_type}&"
"project_id={project_id}&location={location}&instance={instance_name}&"
"use_proxy={use_proxy}&use_ssl={use_ssl}".format(
database_type=database_type,
project_id=project_id,
location=location,
instance_name=instance_name,
use_proxy=use_proxy,
use_ssl=use_ssl,
)
)
self._setup_connections(get_connection, uri)
with self.assertRaises(AirflowException) as cm:
op = CloudSQLExecuteQueryOperator(sql=sql, task_id='task_id')
op.execute(None)
err = cm.exception
self.assertIn(message, str(err))
@mock.patch("airflow.hooks.base.BaseHook.get_connection")
def test_create_operator_with_too_long_unix_socket_path(self, get_connection):
uri = (
"gcpcloudsql://user:password@127.0.0.1:3200/testdb?database_type=postgres&"
"project_id=example-project&location=europe-west1&"
"instance="
"test_db_with_long_name_a_bit_above"
"_the_limit_of_UNIX_socket_asdadadasadasd&"
"use_proxy=True&sql_proxy_use_tcp=False"
)
self._setup_connections(get_connection, uri)
operator = CloudSQLExecuteQueryOperator(sql=['SELECT * FROM TABLE'], task_id='task_id')
with self.assertRaises(AirflowException) as cm:
operator.execute(None)
err = cm.exception
self.assertIn("The UNIX socket path length cannot exceed", str(err))
| apache-2.0 |
sumedhasingla/VTK | ThirdParty/ZopeInterface/zope/interface/common/sequence.py | 52 | 4679 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Sequence Interfaces
"""
__docformat__ = 'restructuredtext'
from zope import interface
class IMinimalSequence(interface.Interface):
"""Most basic sequence interface.
All sequences are iterable. This requires at least one of the
following:
- a `__getitem__()` method that takes a single argument; interger
values starting at 0 must be supported, and `IndexError` should
be raised for the first index for which there is no value, or
- an `__iter__()` method that returns an iterator as defined in
the Python documentation (http://docs.python.org/lib/typeiter.html).
"""
def __getitem__(index):
"""`x.__getitem__(index)` <==> `x[index]`
Declaring this interface does not specify whether `__getitem__`
supports slice objects."""
class IFiniteSequence(IMinimalSequence):
def __len__():
"""`x.__len__()` <==> `len(x)`"""
class IReadSequence(IFiniteSequence):
"""read interface shared by tuple and list"""
def __contains__(item):
"""`x.__contains__(item)` <==> `item in x`"""
def __lt__(other):
"""`x.__lt__(other)` <==> `x < other`"""
def __le__(other):
"""`x.__le__(other)` <==> `x <= other`"""
def __eq__(other):
"""`x.__eq__(other)` <==> `x == other`"""
def __ne__(other):
"""`x.__ne__(other)` <==> `x != other`"""
def __gt__(other):
"""`x.__gt__(other)` <==> `x > other`"""
def __ge__(other):
"""`x.__ge__(other)` <==> `x >= other`"""
def __add__(other):
"""`x.__add__(other)` <==> `x + other`"""
def __mul__(n):
"""`x.__mul__(n)` <==> `x * n`"""
def __rmul__(n):
"""`x.__rmul__(n)` <==> `n * x`"""
def __getslice__(i, j):
"""`x.__getslice__(i, j)` <==> `x[i:j]`
Use of negative indices is not supported.
Deprecated since Python 2.0 but still a part of `UserList`.
"""
class IExtendedReadSequence(IReadSequence):
"""Full read interface for lists"""
def count(item):
"""Return number of occurrences of value"""
def index(item, *args):
"""Return first index of value
`L.index(value, [start, [stop]])` -> integer"""
class IUniqueMemberWriteSequence(interface.Interface):
"""The write contract for a sequence that may enforce unique members"""
def __setitem__(index, item):
"""`x.__setitem__(index, item)` <==> `x[index] = item`
Declaring this interface does not specify whether `__setitem__`
supports slice objects.
"""
def __delitem__(index):
"""`x.__delitem__(index)` <==> `del x[index]`
Declaring this interface does not specify whether `__delitem__`
supports slice objects.
"""
def __setslice__(i, j, other):
"""`x.__setslice__(i, j, other)` <==> `x[i:j]=other`
Use of negative indices is not supported.
Deprecated since Python 2.0 but still a part of `UserList`.
"""
def __delslice__(i, j):
"""`x.__delslice__(i, j)` <==> `del x[i:j]`
Use of negative indices is not supported.
Deprecated since Python 2.0 but still a part of `UserList`.
"""
def __iadd__(y):
"""`x.__iadd__(y)` <==> `x += y`"""
def append(item):
"""Append item to end"""
def insert(index, item):
"""Insert item before index"""
def pop(index=-1):
"""Remove and return item at index (default last)"""
def remove(item):
"""Remove first occurrence of value"""
def reverse():
"""Reverse *IN PLACE*"""
def sort(cmpfunc=None):
"""Stable sort *IN PLACE*; `cmpfunc(x, y)` -> -1, 0, 1"""
def extend(iterable):
"""Extend list by appending elements from the iterable"""
class IWriteSequence(IUniqueMemberWriteSequence):
"""Full write contract for sequences"""
def __imul__(n):
"""`x.__imul__(n)` <==> `x *= n`"""
class ISequence(IReadSequence, IWriteSequence):
"""Full sequence contract"""
| bsd-3-clause |
fractal-mind/portfolio | node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 388 | 91069 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
saurabh6790/OFF-RISAPP | accounts/doctype/journal_voucher/test_journal_voucher.py | 30 | 6500 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import webnotes
class TestJournalVoucher(unittest.TestCase):
def test_journal_voucher_with_against_jv(self):
self.clear_account_balance()
jv_invoice = webnotes.bean(copy=test_records[2])
jv_invoice.insert()
jv_invoice.submit()
self.assertTrue(not webnotes.conn.sql("""select name from `tabJournal Voucher Detail`
where against_jv=%s""", jv_invoice.doc.name))
jv_payment = webnotes.bean(copy=test_records[0])
jv_payment.doclist[1].against_jv = jv_invoice.doc.name
jv_payment.insert()
jv_payment.submit()
self.assertTrue(webnotes.conn.sql("""select name from `tabJournal Voucher Detail`
where against_jv=%s""", jv_invoice.doc.name))
self.assertTrue(webnotes.conn.sql("""select name from `tabJournal Voucher Detail`
where against_jv=%s and credit=400""", jv_invoice.doc.name))
# cancel jv_invoice
jv_invoice.cancel()
self.assertTrue(not webnotes.conn.sql("""select name from `tabJournal Voucher Detail`
where against_jv=%s""", jv_invoice.doc.name))
def test_jv_against_stock_account(self):
from stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = webnotes.bean(copy=test_records[0])
jv.doclist[1].account = "_Test Warehouse - _TC"
jv.insert()
from accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_monthly_budget_crossed_ignore(self):
webnotes.conn.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
self.clear_account_balance()
jv = webnotes.bean(copy=test_records[0])
jv.doclist[2].account = "_Test Account Cost for Goods Sold - _TC"
jv.doclist[2].cost_center = "_Test Cost Center - _TC"
jv.doclist[2].debit = 20000.0
jv.doclist[1].credit = 20000.0
jv.insert()
jv.submit()
self.assertTrue(webnotes.conn.get_value("GL Entry",
{"voucher_type": "Journal Voucher", "voucher_no": jv.doc.name}))
def test_monthly_budget_crossed_stop(self):
from accounts.utils import BudgetError
webnotes.conn.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.clear_account_balance()
jv = webnotes.bean(copy=test_records[0])
jv.doclist[2].account = "_Test Account Cost for Goods Sold - _TC"
jv.doclist[2].cost_center = "_Test Cost Center - _TC"
jv.doclist[2].debit = 20000.0
jv.doclist[1].credit = 20000.0
jv.insert()
self.assertRaises(BudgetError, jv.submit)
webnotes.conn.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def test_yearly_budget_crossed_stop(self):
from accounts.utils import BudgetError
self.clear_account_balance()
self.test_monthly_budget_crossed_ignore()
webnotes.conn.set_value("Company", "_Test Company", "yearly_bgt_flag", "Stop")
jv = webnotes.bean(copy=test_records[0])
jv.doc.posting_date = "2013-08-12"
jv.doclist[2].account = "_Test Account Cost for Goods Sold - _TC"
jv.doclist[2].cost_center = "_Test Cost Center - _TC"
jv.doclist[2].debit = 150000.0
jv.doclist[1].credit = 150000.0
jv.insert()
self.assertRaises(BudgetError, jv.submit)
webnotes.conn.set_value("Company", "_Test Company", "yearly_bgt_flag", "Ignore")
def test_monthly_budget_on_cancellation(self):
from accounts.utils import BudgetError
webnotes.conn.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.clear_account_balance()
jv = webnotes.bean(copy=test_records[0])
jv.doclist[1].account = "_Test Account Cost for Goods Sold - _TC"
jv.doclist[1].cost_center = "_Test Cost Center - _TC"
jv.doclist[1].credit = 30000.0
jv.doclist[2].debit = 30000.0
jv.submit()
self.assertTrue(webnotes.conn.get_value("GL Entry",
{"voucher_type": "Journal Voucher", "voucher_no": jv.doc.name}))
jv1 = webnotes.bean(copy=test_records[0])
jv1.doclist[2].account = "_Test Account Cost for Goods Sold - _TC"
jv1.doclist[2].cost_center = "_Test Cost Center - _TC"
jv1.doclist[2].debit = 40000.0
jv1.doclist[1].credit = 40000.0
jv1.submit()
self.assertTrue(webnotes.conn.get_value("GL Entry",
{"voucher_type": "Journal Voucher", "voucher_no": jv1.doc.name}))
self.assertRaises(BudgetError, jv.cancel)
webnotes.conn.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def clear_account_balance(self):
webnotes.conn.sql("""delete from `tabGL Entry`""")
test_records = [
[{
"company": "_Test Company",
"doctype": "Journal Voucher",
"fiscal_year": "_Test Fiscal Year 2013",
"naming_series": "_T-Journal Voucher-",
"posting_date": "2013-02-14",
"user_remark": "test",
"voucher_type": "Bank Voucher",
"cheque_no": "33",
"cheque_date": "2013-02-14"
},
{
"account": "_Test Customer - _TC",
"doctype": "Journal Voucher Detail",
"credit": 400.0,
"debit": 0.0,
"parentfield": "entries"
},
{
"account": "_Test Account Bank Account - _TC",
"doctype": "Journal Voucher Detail",
"debit": 400.0,
"credit": 0.0,
"parentfield": "entries"
}],
[{
"company": "_Test Company",
"doctype": "Journal Voucher",
"fiscal_year": "_Test Fiscal Year 2013",
"naming_series": "_T-Journal Voucher-",
"posting_date": "2013-02-14",
"user_remark": "test",
"voucher_type": "Bank Voucher",
"cheque_no": "33",
"cheque_date": "2013-02-14"
},
{
"account": "_Test Supplier - _TC",
"doctype": "Journal Voucher Detail",
"credit": 0.0,
"debit": 400.0,
"parentfield": "entries"
},
{
"account": "_Test Account Bank Account - _TC",
"doctype": "Journal Voucher Detail",
"debit": 0.0,
"credit": 400.0,
"parentfield": "entries"
}],
[{
"company": "_Test Company",
"doctype": "Journal Voucher",
"fiscal_year": "_Test Fiscal Year 2013",
"naming_series": "_T-Journal Voucher-",
"posting_date": "2013-02-14",
"user_remark": "test",
"voucher_type": "Bank Voucher",
"cheque_no": "33",
"cheque_date": "2013-02-14"
},
{
"account": "_Test Customer - _TC",
"doctype": "Journal Voucher Detail",
"credit": 0.0,
"debit": 400.0,
"parentfield": "entries"
},
{
"account": "Sales - _TC",
"doctype": "Journal Voucher Detail",
"credit": 400.0,
"debit": 0.0,
"parentfield": "entries",
"cost_center": "_Test Cost Center - _TC"
}],
] | agpl-3.0 |
thiagomagero/namebench | nb_third_party/dns/rdtypes/ANY/CNAME.py | 248 | 1092 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class CNAME(dns.rdtypes.nsbase.NSBase):
"""CNAME record
Note: although CNAME is officially a singleton type, dnspython allows
non-singleton CNAME rdatasets because such sets have been commonly
used by BIND and other nameservers for load balancing."""
pass
| apache-2.0 |
ivano666/tensorflow | tensorflow/python/training/learning_rate_decay.py | 3 | 6424 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import control_flow_ops
def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step /decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. It `True` decay the learning rate at discrete intervals.
name: String. Optional name of the operation. Defaults to 'ExponentialDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.op_scope([learning_rate, global_step, decay_steps, decay_rate],
name, "ExponentialDecay") as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)
def piecewise_constant(x, boundaries, values, name=None):
""" Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
for steps 100001 to 110000, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
"""
with ops.op_scope([x, boundaries, values, name],
name, 'PiecewiseConstant') as name:
x = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
boundaries = ops.convert_n_to_tensor(boundaries)
if not all(b.dtype == x.dtype for b in boundaries):
raise ValueError('boundaries must have the same dtype as x.')
# TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
values = ops.convert_n_to_tensor(values)
if not all(v.dtype == values[0].dtype for v in values):
raise ValueError('values must have elements all with the same dtype.')
pred_fn_pairs = {}
pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x > low) & (x <= high)
pred_fn_pairs[pred] = lambda v=v: v
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
| apache-2.0 |
dgjustice/ansible | lib/ansible/playbook/handler_task_include.py | 97 | 1346 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
#from ansible.inventory.host import Host
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler import Handler
class HandlerTaskInclude(Handler, TaskInclude):
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 |
martzjd/freebefore30 | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/manni.py | 364 | 2374 | # -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
| mit |
jggatc/pyjsdl | app.py | 1 | 5061 | #!/usr/bin/env python
#Pyjsdl - Copyright (C) 2013
#Released under the MIT License
"""
Pyjsdl App
Script launches HTML app on desktop using Gtk/Webkit.
Copy app script to the application root and optionally rename.
Run the script once to create an ini file and edit to configure.
Tested under Linux Gnome desktop with the installed packages:
gir1.2-webkit2-4.0, python-gi (py2), python3-gi (py3).
On other OS, additional installation steps may be required.
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('WebKit2', '4.0')
from gi.repository import Gtk, WebKit2
import multiprocessing
import os.path
import sys
if sys.version_info.major >= 3:
from socketserver import TCPServer
from http.server import SimpleHTTPRequestHandler
else:
from SocketServer import TCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class Server(TCPServer):
allow_reuse_address = True
def __init__(self, port):
TCPServer.__init__(self, ("", port), SimpleHTTPRequestHandler)
self.process = multiprocessing.Process(target=self.serve_forever)
def initiate(self):
self.process.daemon = True
self.process.start()
def terminate(self):
self.process.terminate()
class QuietServer(Server):
def __init__(self, port):
TCPServer.__init__(self, ("", port), QuietHandler)
self.process = multiprocessing.Process(target=self.serve_forever)
class QuietHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
class App(object):
def __init__(self, config):
self.config = config
self.window = Gtk.Window()
self.window.resize(self.config.width+16,self.config.height+16)
if self.config.app_name is not None:
self.window.set_title(self.config.app_name)
else:
title = self.config.app_uri.split('/')[-1].split('.')[0]
self.window.set_title(title.capitalize())
self.window.connect('destroy', Gtk.main_quit)
self.web = None
self.server = None
def webview_setup(self):
self.web = WebKit2.WebView()
uri = 'http://%s:%d/%s' % (self.config.server_ip,
self.config.server_port,
self.config.app_uri)
self.web.load_uri(uri)
self.window.add(self.web)
def webview(self):
self.webview_setup()
self.window.show_all()
Gtk.main()
def server_enable(self):
if not self.server:
if self.config.server_log:
self.server = Server(self.config.server_port)
else:
self.server = QuietServer(self.config.server_port)
self.server.initiate()
def server_disable(self):
if self.server:
self.server.terminate()
class Config(object):
def __init__(self):
self.server_ip = 'localhost'
self.server_port = 8000
self.server_log = False
self.app_uri = None
self.app_name = None
self.width = 500
self.height = 500
self.config_name = sys.argv[0].split('.')[0]+'.ini'
if os.path.exists(self.config_name):
cfg_setting = self.read_ini()
else:
self.create_ini()
print('Enter configuration info in %s.' % self.config_name)
sys.exit()
for setting in cfg_setting:
if setting == 'app_uri':
self.app_uri = cfg_setting['app_uri'].strip()
if setting == 'app_name':
self.app_name = cfg_setting['app_name'].strip()
if setting == 'window_width':
self.width = int(cfg_setting['window_width'].strip())
if setting == 'window_height':
self.height = int(cfg_setting['window_height'].strip())
if setting == 'server_ip':
self.server_ip = cfg_setting['server_ip'].strip()
if setting == 'server_port':
self.server_port = int(cfg_setting['server_port'].strip())
if setting == 'server_log':
server_log = cfg_setting['server_log'].strip().lower()
self.server_log = {'true':True, 'false':False}[server_log]
def create_ini(self):
f = open(self.config_name, 'w')
f.write('#App Configuration\n\n')
f.write('app_uri output/app.html\n\n')
f.write('app_name App\n\n')
f.write('window_width 500\n\n')
f.write('window_height 500\n\n')
f.write('server_ip localhost\n\n')
f.write('server_port 8000\n\n')
f.write('server_log false\n\n')
f.close()
def read_ini(self):
cfg_file = open(self.config_name)
cfg = [ln.strip().split(' ',1) for ln in cfg_file if ln[:1].isalpha()]
cfg = dict(cfg)
cfg_file.close()
return cfg
def main():
config = Config()
app = App(config)
app.server_enable()
app.webview()
app.server_disable()
if __name__ == '__main__':
main()
| mit |
ajayuranakar/django-blog | lib/python2.7/site-packages/django/contrib/gis/geos/base.py | 437 | 1280 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| gpl-3.0 |
olitheolix/qtmacs | doc/source/code_snippets/tut_5_1.py | 1 | 2630 | import qtmacs.qte_global as qte_global
from qtmacs.base_applet import QtmacsApplet
from qtmacs.base_macro import QtmacsMacro
from PyQt4.Qsci import QsciScintilla, QsciScintillaBase, QsciLexerPython
# Get a reference to the main instance of Qtmacs.
qteMain = qte_global.qteMain
class Scintilla(QtmacsApplet):
def __init__(self, appletID):
# Initialise the base classes.
super().__init__(appletID)
# Instantiate and register a Scintilla widget.
self.qteScintilla = self.qteAddWidget(QsciScintilla(self))
# Load this very file and display it inside the Scintilla widget.
tmp = ''.join(open(__file__).readlines())
self.qteScintilla.setText(tmp)
# Change the lexer to Python to activate syntax highlighting,
# and enable code folding.
self.qteScintilla.setLexer(QsciLexerPython())
self.qteScintilla.setFolding(QsciScintilla.BoxedTreeFoldStyle)
# Register the self-insert macro.
name = self.qteMain.qteRegisterMacro(SelfInsert)
# Bind it the standard alphanumerical keys.
alpha_keys = 'abcdefghijklmnopqrstuvwxyz'
alpha_keys += alpha_keys.upper() + '0123456789'
for ch in alpha_keys:
self.qteMain.qteBindKeyWidget(ch, name, self.qteScintilla)
class SelfInsert(QtmacsMacro):
"""
Insert the last typed character.
The ``last_key_sequence`` variable is overwritten/updated every
time the event handler in Qtmacs receives a new keyboard event.
|Signature|
* *applet*: '*'
* *widget*: ``QsciScintilla``
"""
def __init__(self):
super().__init__()
self.qteSetAppletSignature('*')
self.qteSetWidgetSignature('QsciScintilla')
def qteRun(self):
# Extract the last QKeyEvent from the keyboard sequence (there
# should only be one anyway, but just to be sure). Then
# extract the human readable text this key represents.
keys = qte_global.last_key_sequence.toQKeyEventList()[-1]
ch = keys.text()
# Determine the current cursor position inside the Scintilla
# widget (it is organised in lines and columns, not as a stream
# like eg. QTextEdit), insert the character, and manually move
# the caret forward.
line, idx = self.qteWidget.getCursorPosition()
self.qteWidget.insertAt(ch, line, idx)
self.qteWidget.setCursorPosition(line, idx + 1)
# Register the applet and create an instance thereof.
app_name = qteMain.qteRegisterApplet(Scintilla)
app_obj = qteMain.qteNewApplet(app_name)
qteMain.qteMakeAppletActive(app_obj)
| gpl-3.0 |
imsplitbit/nova | nova/tests/api/openstack/compute/plugins/v3/test_consoles.py | 1 | 10510 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid as stdlib_uuid
import webob
from nova.api.openstack.compute.plugins.v3 import consoles
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_password": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTest(test.NoDBTestCase):
def setUp(self):
super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v3/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.ConsolesController()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequestV3.blank(self.url)
self.controller.create(req, self.uuid)
self.assertEqual(self.controller.create.wsgi_code, 201)
def test_create_console_unknown_instance(self):
def fake_create_console(cons_self, context, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'id': 10, 'console_type': 'fake_type'},
{'id': 11, 'console_type': 'fake_type2'}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequestV3.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_list_consoles_unknown_instance(self):
def fake_get_consoles(cons_self, context, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
req, self.uuid)
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFoundForInstance(
instance_uuid=instance_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequestV3.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
| apache-2.0 |
jonnybazookatone/adsws | adsws/api/views.py | 3 | 1265 | from adsws.modules.oauth2server.provider import oauth2
from flask.ext.restful import Resource
from adsws.core import user_manipulator
from flask import current_app, request, abort
class ProtectedView(Resource):
"""
This view is oauth2-authentication protected
"""
decorators = [oauth2.require_oauth()]
def get(self):
return {
'app': current_app.name,
'user': request.oauth.user.email
}, 200
class StatusView(Resource):
"""
Returns the status of this app
"""
def get(self):
return {
'app': current_app.name,
'status': 'online'
}, 200
class UserResolver(Resource):
"""
Resolves an email or uid into a string formatted user object
"""
decorators = [oauth2.require_oauth('adsws:internal')]
def get(self, identifier):
"""
:param identifier: email address or uid
:return: json containing user info or 404
"""
try:
u = user_manipulator.get(int(identifier))
except ValueError:
u = user_manipulator.find(email=identifier).first()
if u is None:
abort(404)
return {
"id": u.id,
"email": u.email,
} | gpl-2.0 |
esmadja/digitalcoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
farhaanbukhsh/sympy | sympy/geometry/tests/test_plane.py | 36 | 7702 | from __future__ import division
from sympy import (Abs, I, Dummy, Rational, Float, S, Symbol, cos, oo, pi,
simplify, sin, sqrt, symbols, Derivative, asin, acos)
from sympy.geometry import (Circle, Curve, Ellipse, GeometryError, Line, Point,
Polygon, Ray, RegularPolygon, Segment, Triangle,
are_similar, convex_hull, intersection,
Point3D, Line3D, Ray3D, Segment3D, Plane, centroid)
from sympy.geometry.util import are_coplanar
from sympy.utilities.pytest import raises, slow
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
t = Symbol('t', real=True)
k = Symbol('k', real=True)
x1 = Symbol('x1', real=True)
x2 = Symbol('x2', real=True)
x3 = Symbol('x3', real=True)
y1 = Symbol('y1', real=True)
y2 = Symbol('y2', real=True)
y3 = Symbol('y3', real=True)
z1 = Symbol('z1', real=True)
z2 = Symbol('z2', real=True)
z3 = Symbol('z3', real=True)
half = Rational(1, 2)
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t = Float("1.0E-10")
return -t < a - b < t
@slow
def test_plane():
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(1, 2, 3)
p4 = Point3D(x, x, x)
p5 = Point3D(y, y, y)
pl3 = Plane(p1, p2, p3)
pl4 = Plane(p1, normal_vector=(1, 1, 1))
pl4b = Plane(p1, p2)
pl5 = Plane(p3, normal_vector=(1, 2, 3))
pl6 = Plane(Point3D(2, 3, 7), normal_vector=(2, 2, 2))
pl7 = Plane(Point3D(1, -5, -6), normal_vector=(1, -2, 1))
l1 = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
l2 = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
l3 = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
assert Plane(p1, p2, p3) != Plane(p1, p3, p2)
assert Plane(p1, p2, p3).is_coplanar(Plane(p1, p3, p2))
assert pl3 == Plane(Point3D(0, 0, 0), normal_vector=(1, -2, 1))
assert pl3 != pl4
assert pl4 == pl4b
assert pl5 == Plane(Point3D(1, 2, 3), normal_vector=(1, 2, 3))
assert pl5.equation(x, y, z) == x + 2*y + 3*z - 14
assert pl3.equation(x, y, z) == x - 2*y + z
assert pl3.p1 == p1
assert pl4.p1 == p1
assert pl5.p1 == p3
assert pl4.normal_vector == (1, 1, 1)
assert pl5.normal_vector == (1, 2, 3)
assert p1 in pl3
assert p1 in pl4
assert p3 in pl5
assert pl3.projection(Point(0, 0)) == p1
p = pl3.projection(Point3D(1, 1, 0))
assert p == Point3D(7/6, 2/3, 1/6)
assert p in pl3
l = pl3.projection_line(Line(Point(0, 0), Point(1, 1)))
assert l == Line3D(Point3D(0, 0, 0), Point3D(7/6, 2/3, 1/6))
assert l in pl3
# get a segment that does not intersect the plane which is also
# parallel to pl3's normal veector
t = Dummy()
r = pl3.random_point()
a = pl3.perpendicular_line(r).arbitrary_point(t)
s = Segment3D(a.subs(t, 1), a.subs(t, 2))
assert s.p1 not in pl3 and s.p2 not in pl3
assert pl3.projection_line(s).equals(r)
assert pl3.projection_line(Segment(Point(1, 0), Point(1, 1))) == \
Segment3D(Point3D(5/6, 1/3, -1/6), Point3D(7/6, 2/3, 1/6))
assert pl6.projection_line(Ray(Point(1, 0), Point(1, 1))) == \
Ray3D(Point3D(14/3, 11/3, 11/3), Point3D(13/3, 13/3, 10/3))
assert pl3.perpendicular_line(r.args) == pl3.perpendicular_line(r)
assert pl3.is_parallel(pl6) is False
assert pl4.is_parallel(pl6)
assert pl6.is_parallel(l1) is False
assert pl3.is_perpendicular(pl6)
assert pl4.is_perpendicular(pl7)
assert pl6.is_perpendicular(pl7)
assert pl6.is_perpendicular(l1) is False
assert pl7.distance(Point3D(1, 3, 5)) == 5*sqrt(6)/6
assert pl6.distance(Point3D(0, 0, 0)) == 4*sqrt(3)
assert pl6.distance(pl6.p1) == 0
assert pl7.distance(pl6) == 0
assert pl7.distance(l1) == 0
assert pl6.distance(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == 0
pl6.distance(Plane(Point3D(5, 5, 5), normal_vector=(8, 8, 8))) == sqrt(3)
assert pl6.angle_between(pl3) == pi/2
assert pl6.angle_between(pl6) == 0
assert pl6.angle_between(pl4) == 0
assert pl7.angle_between(Line3D(Point3D(2, 3, 5), Point3D(2, 4, 6))) == \
-asin(sqrt(3)/6)
assert pl6.angle_between(Ray3D(Point3D(2, 4, 1), Point3D(6, 5, 3))) == \
asin(sqrt(7)/3)
assert pl7.angle_between(Segment3D(Point3D(5, 6, 1), Point3D(1, 2, 4))) == \
-asin(7*sqrt(246)/246)
assert are_coplanar(l1, l2, l3) is False
assert are_coplanar(l1) is False
assert are_coplanar(Point3D(2, 7, 2), Point3D(0, 0, 2),
Point3D(1, 1, 2), Point3D(1, 2, 2))
assert are_coplanar(Plane(p1, p2, p3), Plane(p1, p3, p2))
assert Plane.are_concurrent(pl3, pl4, pl5) is False
assert Plane.are_concurrent(pl6) is False
raises(ValueError, lambda: Plane.are_concurrent(Point3D(0, 0, 0)))
assert pl3.parallel_plane(Point3D(1, 2, 5)) == Plane(Point3D(1, 2, 5), \
normal_vector=(1, -2, 1))
# perpendicular_plane
p = Plane((0, 0, 0), (1, 0, 0))
# default
assert p.perpendicular_plane() == Plane(Point3D(0, 0, 0), (0, 1, 0))
# 1 pt
assert p.perpendicular_plane(Point3D(1, 0, 1)) == \
Plane(Point3D(1, 0, 1), (0, 1, 0))
# pts as tuples
assert p.perpendicular_plane((1, 0, 1), (1, 1, 1)) == \
Plane(Point3D(1, 0, 1), (0, 0, -1))
a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
Z = (0, 0, 1)
p = Plane(a, normal_vector=Z)
# case 4
assert p.perpendicular_plane(a, b) == Plane(a, (1, 0, 0))
n = Point3D(*Z)
# case 1
assert p.perpendicular_plane(a, n) == Plane(a, (-1, 0, 0))
# case 2
assert Plane(a, normal_vector=b.args).perpendicular_plane(a, a + b) == \
Plane(Point3D(0, 0, 0), (1, 0, 0))
# case 1&3
assert Plane(b, normal_vector=Z).perpendicular_plane(b, b + n) == \
Plane(Point3D(0, 1, 0), (-1, 0, 0))
# case 2&3
assert Plane(b, normal_vector=b.args).perpendicular_plane(n, n + b) == \
Plane(Point3D(0, 0, 1), (1, 0, 0))
assert pl6.intersection(pl6) == [pl6]
assert pl4.intersection(pl4.p1) == [pl4.p1]
assert pl3.intersection(pl6) == [
Line3D(Point3D(8, 4, 0), Point3D(2, 4, 6))]
assert pl3.intersection(Line3D(Point3D(1,2,4), Point3D(4,4,2))) == [
Point3D(2, 8/3, 10/3)]
assert pl3.intersection(Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
) == [Line3D(Point3D(-24, -12, 0), Point3D(-25, -13, -1))]
assert pl6.intersection(Ray3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl6.intersection(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl7.intersection(Line(Point(2, 3), Point(4, 2))) == [
Point3D(13/2, 3/4, 0)]
r = Ray(Point(2, 3), Point(4, 2))
assert Plane((1,2,0), normal_vector=(0,0,1)).intersection(r) == [
Ray3D(Point(2, 3), Point(4, 2))]
assert pl3.random_point() in pl3
# issue 8570
l2 = Line3D(Point3D(S(50000004459633)/5000000000000,
-S(891926590718643)/1000000000000000,
S(231800966893633)/100000000000000),
Point3D(S(50000004459633)/50000000000000,
-S(222981647679771)/250000000000000,
S(231800966893633)/100000000000000))
p2 = Plane(Point3D(S(402775636372767)/100000000000000,
-S(97224357654973)/100000000000000,
S(216793600814789)/100000000000000),
(-S('9.00000087501922'), -S('4.81170658872543e-13'),
S('0.0')))
assert str([i.n(2) for i in p2.intersection(l2)]) == \
'[Point3D(4.0, -0.89, 2.3)]'
| bsd-3-clause |
dr0pz0ne/sibble | lib/ansible/parsing/yaml/constructor.py | 27 | 4933 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import Constructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.vars.unsafe_proxy import wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleConstructor(Constructor):
def __init__(self, file_name=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}). Using last defined value only.'.format(key, *mapping.ansible_pos))
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node, unsafe=False):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
if unsafe:
ret = wrap_var(ret)
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
return self.construct_yaml_str(node, unsafe=True)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq)
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe)
| gpl-3.0 |
janezhango/BigDataMachineLearning | py/testdir_single_jvm_fvec/test_parse_summary_manyfiles_1_fvec.py | 2 | 3463 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, java_heap_GB=4)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_summary_manyfiles_1_fvec(self):
h2o.beta_features = True
# these will be used as directory imports/parse
csvDirlist = [
("manyfiles-nflx-gz", 600),
]
trial = 0
for (csvDirname, timeoutSecs) in csvDirlist:
csvPathname = csvDirname + "/file_1.dat.gz"
(importResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets', path=csvPathname, schema='local', timeoutSecs=timeoutSecs)
print "\nTrying StoreView after the import hdfs"
h2o_cmd.runStoreView(timeoutSecs=120)
trialStart = time.time()
# PARSE****************************************
hex_key = csvDirname + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=120)
elapsed = time.time() - start
print "parse end on ", parseResult['destination_key'], 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# INSPECT******************************************
# We should be able to see the parse result?
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
numRows = inspect['numRows']
numCols = inspect['numCols']
self.assertEqual(numCols, 542)
self.assertEqual(numRows, 100000)
# gives us some reporting on missing values, constant values, to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=54, key=parseResult['destination_key'], timeoutSecs=300)
# SUMMARY****************************************
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=360,
numRows=numRows, numCols=numCols)
# STOREVIEW***************************************
print "\nTrying StoreView after the parse"
h2o_cmd.runStoreView(timeoutSecs=120)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds."
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
DirkHoffmann/indico | indico/modules/auth/models/identities.py | 4 | 2796 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime
from sqlalchemy.dialects.postgresql import INET, JSONB
from werkzeug.datastructures import MultiDict
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.util.date_time import as_utc, now_utc
from indico.util.passwords import PasswordProperty
class Identity(db.Model):
"""Identities of Indico users."""
__tablename__ = 'identities'
__table_args__ = (db.UniqueConstraint('provider', 'identifier'),
{'schema': 'users'})
#: the unique id of the identity
id = db.Column(
db.Integer,
primary_key=True
)
#: the id of the user this identity belongs to
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=False
)
#: the provider name of the identity
provider = db.Column(
db.String,
nullable=False
)
#: the unique identifier of the user within its provider
identifier = db.Column(
db.String,
nullable=False
)
#: internal data used by the flask-multipass system
multipass_data = db.Column(
JSONB,
nullable=False,
default=lambda: None
)
#: the user data from the user provider
_data = db.Column(
'data',
JSONB,
nullable=False,
default={}
)
#: the hash of the password in case of a local identity
password_hash = db.Column(
db.String
)
#: the password of the user in case of a local identity
password = PasswordProperty('password_hash')
#: the timestamp of the latest login
last_login_dt = db.Column(
UTCDateTime
)
#: the ip address that was used for the latest login
last_login_ip = db.Column(
INET
)
# relationship backrefs:
# - user (User.identities)
@property
def data(self):
data = MultiDict()
data.update(self._data)
return data
@data.setter
def data(self, data):
self._data = dict(data.lists())
@property
def locator(self):
return {'identity': self.id}
@property
def safe_last_login_dt(self):
"""last_login_dt that is safe for sorting (no None values)."""
return self.last_login_dt or as_utc(datetime(1970, 1, 1))
def register_login(self, ip):
"""Update the last login information."""
self.last_login_dt = now_utc()
self.last_login_ip = ip
def __repr__(self):
return f'<Identity({self.id}, {self.user_id}, {self.provider}, {self.identifier})>'
| gpl-3.0 |
rosswhitfield/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/FindPeakAutomaticTest.py | 3 | 26334 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import numpy as np
from mantid.simpleapi import CreateEmptyTableWorkspace, CreateWorkspace, DeleteWorkspace, FindPeaksAutomatic
from mantid.api import mtd
from unittest import mock
import plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic as _FindPeaksAutomatic
class FindPeaksAutomaticTest(unittest.TestCase):
data_ws = None
peak_guess_table = None
peak_table_header = [
'centre', 'error centre', 'height', 'error height', 'sigma', 'error sigma', 'area',
'error area'
]
alg_instance = None
x_values = None
y_values = None
def setUp(self):
# Creating two peaks on an exponential background with gaussian noise
self.x_values = np.linspace(0, 100, 1001)
self.centre = [25, 75]
self.height = [35, 20]
self.width = [10, 5]
self.y_values = self.gaussian(self.x_values, self.centre[0], self.height[0], self.width[0])
self.y_values += self.gaussian(self.x_values, self.centre[1], self.height[1], self.width[1])
self.background = 10 * np.ones(len(self.x_values))
self.y_values += self.background
# Generating a table with a guess of the position of the centre of the peaks
peak_table = CreateEmptyTableWorkspace()
peak_table.addColumn(type='float', name='Approximated Centre')
peak_table.addRow([self.centre[0] + 2])
peak_table.addRow([self.centre[1] - 3])
self.peakids = [
np.argwhere(self.x_values == self.centre[0])[0, 0],
np.argwhere(self.x_values == self.centre[1])[0, 0]
]
# Generating a workspace with the data and a flat background
self.raw_ws = CreateWorkspace(DataX=self.x_values,
DataY=self.y_values,
OutputWorkspace='raw_ws')
self.data_ws = CreateWorkspace(DataX=np.concatenate((self.x_values, self.x_values)),
DataY=np.concatenate((self.y_values, self.background)),
DataE=np.sqrt(
np.concatenate((self.y_values, self.background))),
NSpec=2,
OutputWorkspace='data_ws')
self.peak_guess_table = peak_table
self.alg_instance = _FindPeaksAutomatic.FindPeaksAutomatic()
def tearDown(self):
self.delete_if_present('data_ws')
self.delete_if_present('peak_guess_table')
self.delete_if_present('peak_table')
self.delete_if_present('refit_peak_table')
self.delete_if_present('fit_cost')
self.delete_if_present('fit_result_NormalisedCovarianceMatrix')
self.delete_if_present('fit_result_Parameters')
self.delete_if_present('fit_result_Workspace')
self.delete_if_present('fit_table')
self.delete_if_present('data_table')
self.delete_if_present('refit_data_table')
self.delete_if_present('tmp_table')
self.alg_instance = None
self.peak_guess_table = None
self.data_ws = None
@staticmethod
def gaussian(xvals, centre, height, sigma):
exponent = (xvals - centre) / (np.sqrt(2) * sigma)
return height * np.exp(-exponent * exponent)
@staticmethod
def delete_if_present(workspace):
if workspace in mtd:
DeleteWorkspace(workspace)
def assertTableEqual(self, expected, actual):
self.assertEqual(expected.columnCount(), actual.columnCount())
self.assertEqual(expected.rowCount(), actual.rowCount())
for i in range(expected.rowCount()):
self.assertEqual(expected.row(i), actual.row(i))
def assertPeakFound(self, peak_params, centre, height, sigma, tolerance=0.01):
if not np.isclose(peak_params['centre'], centre, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(centre, peak_params['centre'], tolerance))
if not np.isclose(peak_params['height'], height, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(height, peak_params['height'], tolerance))
if not np.isclose(peak_params['sigma'], sigma, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(sigma, peak_params['sigma'], tolerance))
def test_algorithm_with_no_input_workspace_raises_exception(self):
with self.assertRaises(RuntimeError):
FindPeaksAutomatic()
def test_algorithm_with_negative_acceptance_threshold_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
AcceptanceThreshold=-0.1,
PlotPeaks=False)
def test_algorithm_with_invalid_spectrum_number(self):
#tests that a float spectrum number throws an error
with self.assertRaises(TypeError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
PlotPeaks=False,
SpectrumNumber = 3.4)
#tests that a negative integer throws an error
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
PlotPeaks=False,
SpectrumNumber = -1 )
def test_algorithm_with_negative_smooth_window_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, SmoothWindow=-5, PlotPeaks=False)
def test_algorithm_with_negative_num_bad_peaks_to_consider_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, BadPeaksToConsider=-3, PlotPeaks=False)
def test_algorithm_with_negative_estimate_of_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, EstimatePeakSigma=-3, PlotPeaks=False)
def test_algorithm_with_negative_min_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, MinPeakSigma=-0.1, PlotPeaks=False)
def test_algorithm_with_negative_max_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, MaxPeakSigma=-0.1, PlotPeaks=False)
def test_algorithm_creates_all_output_workspaces(self):
ws_name = self.raw_ws.getName()
FindPeaksAutomatic(self.raw_ws)
self.assertIn('{}_with_errors'.format(ws_name), mtd)
self.assertIn('{}_{}'.format(self.raw_ws.getName(), 'properties'), mtd)
self.assertIn('{}_{}'.format(self.raw_ws.getName(), 'refit_properties'), mtd)
def test_algorithm_works_on_specified_spectrum(self):
x_values = np.array([np.linspace(0, 100, 1001), np.linspace(0, 100, 1001)], dtype=float)
centre = np.array([[25, 75], [10, 60]], dtype=float)
height = np.array([[35, 20], [40, 50]], dtype=float)
width = np.array([[10, 5], [8, 6]], dtype=float)
y_values = np.array(
[self.gaussian(x_values[0], centre[0, 0], height[0, 0], width[0, 0]),
self.gaussian(x_values[1], centre[1, 0], height[1, 0], width[1, 0])])
y_values += np.array(
[self.gaussian(x_values[0], centre[0, 1], height[0, 1], width[0, 1]),
self.gaussian(x_values[1], centre[1, 1], height[1, 1], width[1, 1])])
background = 10 * np.ones(x_values.shape)
y_values += background
raw_ws = CreateWorkspace(DataX=x_values,
DataY=y_values,
OutputWorkspace='raw_ws',NSpec = 2)
FindPeaksAutomatic(
InputWorkspace=raw_ws,
SpectrumNumber = 2,
SmoothWindow=500,
EstimatePeakSigma=6,
MinPeakSigma=3,
MaxPeakSigma=15,
)
peak_table = mtd['{}_{}'.format(raw_ws.getName(), 'properties')]
print(peak_table.row(1))
self.assertPeakFound(peak_table.row(0), 10, 40, 8)
self.assertPeakFound(peak_table.row(1), 60, 50, 6)
def test_algorithm_throws_RuntimeError_when_called_with_invalid_spectrum_number(self):
x_values = np.array([np.linspace(0, 100, 1001), np.linspace(0, 100, 1001)], dtype=float)
centre = np.array([[25, 75], [10, 60]], dtype=float)
height = np.array([[35, 20], [40, 50]], dtype=float)
width = np.array([[10, 5], [8, 6]], dtype=float)
y_values = np.array(
[self.gaussian(x_values[0], centre[0, 0], height[0, 0], width[0, 0]),
self.gaussian(x_values[1], centre[1, 0], height[1, 0], width[1, 0])])
y_values += np.array(
[self.gaussian(x_values[0], centre[0, 1], height[0, 1], width[0, 1]),
self.gaussian(x_values[1], centre[1, 1], height[1, 1], width[1, 1])])
background = 10 * np.ones(x_values.shape)
y_values += background
raw_ws = CreateWorkspace(DataX=x_values,
DataY=y_values,
OutputWorkspace='raw_ws',NSpec = 2)
with self.assertRaises(RuntimeError):
FindPeaksAutomatic(
InputWorkspace=raw_ws,
SpectrumNumber = 3,
SmoothWindow=500,
EstimatePeakSigma=6,
MinPeakSigma=3,
MaxPeakSigma=15,)
def test_algorithm_does_not_create_temporary_workspaces(self):
FindPeaksAutomatic(self.raw_ws)
self.assertNotIn('ret', mtd)
self.assertNotIn('raw_data_ws', mtd)
self.assertNotIn('flat_ws', mtd)
self.assertNotIn('fit_result_NormalisedCovarianceMatrix', mtd)
self.assertNotIn('fit_result_Parameters', mtd)
self.assertNotIn('fit_result_Workspace', mtd)
self.assertNotIn('fit_cost', mtd)
def test_output_tables_are_correctly_formatted(self):
FindPeaksAutomatic(self.raw_ws, FitToBaseline=True)
peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'refit_properties')]
self.assertEqual(self.peak_table_header, peak_table.getColumnNames())
self.assertEqual(self.peak_table_header, refit_peak_table.getColumnNames())
self.assertEqual(2, peak_table.rowCount())
self.assertEqual(0, refit_peak_table.rowCount())
def test_single_erosion_returns_correct_result(self):
yvals = np.array([-2, 3, 1, 0, 4])
self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 2, 2))
def test_single_erosion_checks_extremes_of_list_correctly(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 3, 1))
self.assertEqual(-3, self.alg_instance._single_erosion(yvals, 3, 2))
def test_single_erosion_with_zero_window_does_nothing(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(0, self.alg_instance._single_erosion(yvals, 2, 0))
def test_single_dilation_returns_correct_result(self):
yvals = np.array([-2, 3, 1, 0, 4])
self.assertEqual(4, self.alg_instance._single_dilation(yvals, 2, 2))
def test_single_dilation_checks_extremes_of_list_correctly(self):
yvals = np.array([-5, 3, 0, -7, 2, -2, 9])
self.assertEqual(2, self.alg_instance._single_dilation(yvals, 3, 1))
self.assertEqual(3, self.alg_instance._single_dilation(yvals, 3, 2))
def test_single_dilation_with_zero_window_does_nothing(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(0, self.alg_instance._single_dilation(yvals, 2, 0))
def test_erosion_with_zero_window_is_an_invariant(self):
np.testing.assert_equal(self.y_values, self.alg_instance.erosion(self.y_values, 0))
def test_erosion_calls_single_erosion_the_correct_number_of_times(self, ):
with mock.patch(
'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_erosion'
) as mock_single_erosion:
times = len(self.y_values)
win_size = 2
call_list = []
for i in range(times):
call_list.append(mock.call(self.y_values, i, win_size))
self.alg_instance.erosion(self.y_values, win_size)
self.assertEqual(times, mock_single_erosion.call_count)
mock_single_erosion.assert_has_calls(call_list, any_order=True)
def test_dilation_with_zero_window_is_an_invariant(self):
np.testing.assert_equal(self.y_values, self.alg_instance.dilation(self.y_values, 0))
def test_dilation_calls_single_erosion_the_correct_number_of_times(self):
with mock.patch(
'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_dilation'
) as mock_single_dilation:
times = len(self.y_values)
win_size = 2
call_list = []
for i in range(times):
call_list.append(mock.call(self.y_values, i, win_size))
self.alg_instance.dilation(self.y_values, win_size)
self.assertEqual(times, mock_single_dilation.call_count)
mock_single_dilation.assert_has_calls(call_list, any_order=True)
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation'
)
def test_opening_calls_correct_functions_in_correct_order(self, mock_dilation, mock_erosion):
win_size = 3
self.alg_instance.opening(self.y_values, win_size)
self.assertEqual(mock_erosion.call_count, 1)
self.assertEqual(mock_dilation.call_count, 1)
erosion_ret = self.alg_instance.erosion(self.y_values, win_size)
mock_erosion.assert_called_with(self.y_values, win_size)
mock_dilation.assert_called_with(erosion_ret, win_size)
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.opening')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion')
def test_average_calls_right_functions_in_right_order(self, mock_erosion, mock_dilation,
mock_opening):
win_size = 3
self.alg_instance.average(self.y_values, win_size)
self.assertEqual(mock_erosion.call_count, 1)
self.assertEqual(mock_dilation.call_count, 1)
self.assertEqual(mock_opening.call_count, 2)
op_ret = self.alg_instance.opening(self.y_values, win_size)
mock_opening.assert_called_with(self.y_values, win_size)
mock_dilation.assert_called_with(op_ret, win_size)
mock_erosion.assert_called_with(op_ret, win_size)
def test_generate_peak_guess_table_correctly_formats_table(self):
peakids = [2, 4, 10, 34]
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, peakids)
self.assertEqual(peak_guess_table.getColumnNames(), ['centre'])
def test_generate_peak_guess_table_with_no_peaks_generates_empty_table(self):
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, [])
self.assertEqual(peak_guess_table.rowCount(), 0)
def test_generate_peak_guess_table_adds_correct_values_of_peak_centre(self):
peakids = [2, 23, 19, 34, 25, 149, 234]
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, peakids)
for i, pid in enumerate(sorted(peakids)):
self.assertAlmostEqual(peak_guess_table.row(i)['centre'], self.x_values[pid], 5)
def test_find_good_peaks_calls_fit_gaussian_peaks_twice_if_no_peaks_given(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
) as mock_fit:
tmp_table = CreateEmptyTableWorkspace()
tmp_table.addColumn(type='float', name='chi2')
tmp_table.addColumn(type='float', name='poisson')
tmp_table.addRow([10, 20])
mock_fit.return_value = (mock.MagicMock(), mock.MagicMock(), tmp_table)
self.alg_instance.min_sigma = 1
self.alg_instance.max_sigma = 10
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False, self.data_ws, 5)
self.assertEqual(2, mock_fit.call_count)
def _table_side_effect(self, idx):
raise ValueError('Index = %d' % idx)
def test_find_good_peaks_selects_correct_column_for_error(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
) as mock_fit:
mock_table = mock.Mock()
mock_table.column.side_effect = self._table_side_effect
mock_fit.return_value = None, None, mock_table
# chi2 cost
with self.assertRaises(ValueError) as chi2:
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False, self.data_ws, 5)
# poisson cost
with self.assertRaises(ValueError) as poisson:
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, True, self.data_ws, 5)
self.assertIn('Index = 0', chi2.exception.args)
self.assertNotIn('Index = 1', chi2.exception.args)
self.assertNotIn('Index = 0', poisson.exception.args)
self.assertIn('Index = 1', poisson.exception.args)
def test_find_good_peaks_returns_correct_peaks(self):
self.alg_instance._min_sigma = 1
self.alg_instance._max_sigma = 10
actual_peaks, peak_table, refit_peak_table = self.alg_instance.find_good_peaks(
self.x_values, self.peakids, 0, 5, False, self.data_ws, 5)
peak1 = peak_table.row(0)
peak2 = peak_table.row(1)
self.assertEquals(self.peakids, actual_peaks)
self.assertEqual(0, refit_peak_table.rowCount())
self.assertEqual(refit_peak_table.getColumnNames(), peak_table.getColumnNames())
self.assertPeakFound(peak1, self.centre[0], self.height[0]+10, self.width[0], 0.05)
self.assertPeakFound(peak2, self.centre[1], self.height[1]+10, self.width[1], 0.05)
def test_find_peaks_is_called_if_scipy_version_higher_1_1_0(self):
mock_scipy = mock.MagicMock()
mock_scipy.__version__ = '1.1.0'
mock_scipy.signal.find_peaks.return_value = (self.peakids, {
'prominences': self.peakids
})
with mock.patch.dict('sys.modules', scipy=mock_scipy):
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
self.assertEqual(2, mock_scipy.signal.find_peaks.call_count)
self.assertEqual(0, mock_scipy.signal.find_peaks_cwt.call_count)
def test_find_peaks_cwt_is_called_if_scipy_version_lower_1_1_0(self):
mock_scipy = mock.MagicMock()
mock_scipy.__version__ = '1.0.0'
mock_scipy.signal.find_peaks.return_value = (self.peakids, {
'prominences': self.peakids
})
with mock.patch.dict('sys.modules', scipy=mock_scipy):
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
self.assertEqual(0, mock_scipy.signal.find_peaks.call_count)
self.assertEqual(1, mock_scipy.signal.find_peaks_cwt.call_count)
def test_process_calls_find_good_peaks(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
) as mock_create_ws:
mock_create_ws.return_value = self.data_ws
self.alg_instance.find_good_peaks = mock.Mock()
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
base = self.alg_instance.average(self.y_values, 50)
base += self.alg_instance.average(self.y_values - base, 50)
flat = self.y_values - base
self.assertEqual(1, self.alg_instance.find_good_peaks.call_count)
self.alg_instance.find_good_peaks.asser_called_with(self.x_values,
flat,
acceptance=0,
bad_peak_to_consider=2,
use_poisson=False,
fit_ws=self.data_ws,
peak_width_estimate=5)
def test_process_returns_the_return_value_of_find_good_peaks(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
) as mock_create_ws:
mock_create_ws.return_value = self.data_ws
win_size = 500
actual_return = self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=win_size,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
import copy
actual_return = copy.deepcopy(actual_return)
base = self.alg_instance.average(self.y_values, win_size)
base += self.alg_instance.average(self.y_values - base, win_size)
expected_return = self.alg_instance.find_good_peaks(self.x_values,
self.peakids,
acceptance=0,
bad_peak_to_consider=2,
use_poisson=False,
fit_ws=self.data_ws,
peak_width_estimate=5), base
self.assertEqual(expected_return[0][0], actual_return[0][0])
self.assertTableEqual(expected_return[0][1], actual_return[0][1])
np.testing.assert_almost_equal(expected_return[1], actual_return[1])
def _assert_matplotlib_not_present(self, *args):
import sys
self.assertNotIn('matplotlib.pyplot', sys.modules)
# If matplotlib.pyplot is imported other tests fail on windows and ubuntu
def test_matplotlib_pyplot_is_not_imported(self):
self.alg_instance.dilation = mock.Mock(side_effect=self._assert_matplotlib_not_present)
self.alg_instance.opening(self.y_values, 0)
def test_that_algorithm_finds_peaks_correctly(self):
FindPeaksAutomatic(
InputWorkspace=self.raw_ws,
SmoothWindow=500,
EstimatePeakSigma=5,
MinPeakSigma=3,
MaxPeakSigma=15,
)
peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'refit_properties')]
self.assertEqual(2, peak_table.rowCount())
self.assertEqual(0, refit_peak_table.rowCount())
self.assertPeakFound(peak_table.row(0), self.centre[0], self.height[0], self.width[0], 0.05)
self.assertPeakFound(peak_table.row(1), self.centre[1], self.height[1], self.width[1], 0.05)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ilayn/scipy | scipy/special/_precompute/struve_convergence.py | 12 | 3456 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
import numpy as np
import matplotlib.pyplot as plt # type: ignore[import]
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
furukawa3/net-fj | doc/source/conf.py | 1 | 2465 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'networking-fujitsu'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None} | apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.6/Lib/encodings/iso2022_jp.py | 816 | 1053 | #
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
ChristopherHogan/numpy | numpy/polynomial/tests/test_polynomial.py | 123 | 15321 | """Tests for polynomial module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
def trim(x):
return poly.polytrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestConstants(TestCase):
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
def test_polyzero(self):
assert_equal(poly.polyzero, [0])
def test_polyone(self):
assert_equal(poly.polyone, [1])
def test_polyx(self):
assert_equal(poly.polyx, [0, 1])
class TestArithmetic(TestCase):
def test_polyadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = poly.polyadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polysub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = poly.polysub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polymulx(self):
assert_equal(poly.polymulx([0]), [0])
assert_equal(poly.polymulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i + 1) + [1]
assert_equal(poly.polymulx(ser), tgt)
def test_polymul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polydiv(self):
# check zero division
assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
# check scalar division
quo, rem = poly.polydiv([2], [2])
assert_equal((quo, rem), (1, 0))
quo, rem = poly.polydiv([2, 2], [2])
assert_equal((quo, rem), ((1, 1), 0))
# check rest.
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1, 2]
cj = [0]*j + [1, 2]
tgt = poly.polyadd(ci, cj)
quo, rem = poly.polydiv(tgt, ci)
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = poly.polyval(x, [1., 2., 3.])
def test_polyval(self):
#check empty input
assert_equal(poly.polyval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [x**i for i in range(5)]
for i in range(5):
tgt = y[i]
res = poly.polyval(x, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = x*(x**2 - 1)
res = poly.polyval(x, [0, -1, 0, 1])
assert_almost_equal(res, tgt)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(poly.polyval(x, [1]).shape, dims)
assert_equal(poly.polyval(x, [1, 0]).shape, dims)
assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
def test_polyval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = poly.polyval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polyval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_polyval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = poly.polyval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polyval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_polygrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = poly.polygrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polygrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_polygrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = poly.polygrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polygrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_polyint(self):
# check exceptions
assert_raises(ValueError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = poly.polyint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
res = poly.polyint(pol, m=1, k=[i])
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
assert_almost_equal(poly.polyval(-1, res), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
res = poly.polyint(pol, m=1, k=[i], scl=2)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1)
res = poly.polyint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k])
res = poly.polyint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T
res = poly.polyint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c) for c in c2d])
res = poly.polyint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])
res = poly.polyint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_polyder(self):
# check exceptions
assert_raises(ValueError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = poly.polyder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_polyder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T
res = poly.polyder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyder(c) for c in c2d])
res = poly.polyder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_polyvander(self):
# check for 1d x
x = np.arange(3)
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
def test_polyvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = poly.polyvander2d(x1, x2, [1, 2])
tgt = poly.polyval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_polyvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])
tgt = poly.polyval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
assert_raises(ValueError, poly.polycompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(poly.polycompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
class TestMisc(TestCase):
def test_polyfromroots(self):
res = poly.polyfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = Tlist[i]
res = poly.polyfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res), trim(tgt))
def test_polyroots(self):
assert_almost_equal(poly.polyroots([1]), [])
assert_almost_equal(poly.polyroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = poly.polyroots(poly.polyfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_polyfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, poly.polyfit, [1], [1], -1)
assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
assert_raises(TypeError, poly.polyfit, [], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = poly.polyfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
#
coef4 = poly.polyfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
#
coef2d = poly.polyfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
wcoef3 = poly.polyfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
def test_polytrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, poly.polytrim, coef, -1)
# Test results
assert_equal(poly.polytrim(coef), coef[:-1])
assert_equal(poly.polytrim(coef, 1), coef[:-3])
assert_equal(poly.polytrim(coef, 2), [0])
def test_polyline(self):
assert_equal(poly.polyline(3, 4), [3, 4])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
psachin/swift | test/probe/test_object_partpower_increase.py | 6 | 7478 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from errno import EEXIST
from shutil import copyfile
from tempfile import mkstemp
from time import time
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.cli.relinker import relink, cleanup
from swift.common.manager import Manager
from swift.common.ring import RingBuilder
from swift.common.utils import replace_partition_in_path
from swift.obj.diskfile import get_data_dir
from test.probe.common import ECProbeTest, ProbeTest, ReplProbeTest
class TestPartPowerIncrease(ProbeTest):
def setUp(self):
super(TestPartPowerIncrease, self).setUp()
_, self.ring_file_backup = mkstemp()
_, self.builder_file_backup = mkstemp()
self.ring_file = self.object_ring.serialized_path
self.builder_file = self.ring_file.replace('ring.gz', 'builder')
copyfile(self.ring_file, self.ring_file_backup)
copyfile(self.builder_file, self.builder_file_backup)
# In case the test user is not allowed to write rings
self.assertTrue(os.access('/etc/swift', os.W_OK))
self.assertTrue(os.access('/etc/swift/backups', os.W_OK))
self.assertTrue(os.access('/etc/swift/object.builder', os.W_OK))
self.assertTrue(os.access('/etc/swift/object.ring.gz', os.W_OK))
# Ensure the test object will be erasure coded
self.data = ' ' * getattr(self.policy, 'ec_segment_size', 1)
self.devices = [
self.device_dir('object', {'ip': ip, 'port': port, 'device': ''})
for ip, port in set((dev['ip'], dev['port'])
for dev in self.object_ring.devs)]
def tearDown(self):
# Keep a backup copy of the modified .builder file
backup_dir = os.path.join(
os.path.dirname(self.builder_file), 'backups')
try:
os.mkdir(backup_dir)
except OSError as err:
if err.errno != EEXIST:
raise
backup_name = (os.path.join(
backup_dir,
'%d.probe.' % time() + os.path.basename(self.builder_file)))
copyfile(self.builder_file, backup_name)
# Restore original ring
os.system('sudo mv %s %s' % (
self.ring_file_backup, self.ring_file))
os.system('sudo mv %s %s' % (
self.builder_file_backup, self.builder_file))
def _find_objs_ondisk(self, container, obj):
locations = []
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
for node in onodes:
start_dir = os.path.join(
self.device_dir('object', node),
get_data_dir(self.policy),
str(opart))
for root, dirs, files in os.walk(start_dir):
for filename in files:
if filename.endswith('.data'):
locations.append(os.path.join(root, filename))
return locations
def _test_main(self, cancel=False):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
obj2 = 'object-%s' % uuid4()
# Create container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container, headers=headers)
# Create a new object
client.put_object(self.url, self.token, container, obj, self.data)
client.head_object(self.url, self.token, container, obj)
# Prepare partition power increase
builder = RingBuilder.load(self.builder_file)
builder.prepare_increase_partition_power()
builder.save(self.builder_file)
ring_data = builder.get_ring()
ring_data.save(self.ring_file)
# Ensure the proxy uses the changed ring
Manager(['proxy']).restart()
# Ensure object is still accessible
client.head_object(self.url, self.token, container, obj)
# Relink existing objects
for device in self.devices:
self.assertEqual(0, relink(skip_mount_check=True, devices=device))
# Create second object after relinking and ensure it is accessible
client.put_object(self.url, self.token, container, obj2, self.data)
client.head_object(self.url, self.token, container, obj2)
# Remember the original object locations
org_locations = self._find_objs_ondisk(container, obj)
org_locations += self._find_objs_ondisk(container, obj2)
# Remember the new object locations
new_locations = []
for loc in org_locations:
new_locations.append(replace_partition_in_path(
str(loc), self.object_ring.part_power + 1))
# Overwrite existing object - to ensure that older timestamp files
# will be cleaned up properly later
client.put_object(self.url, self.token, container, obj, self.data)
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Increase partition power
builder = RingBuilder.load(self.builder_file)
if not cancel:
builder.increase_partition_power()
else:
builder.cancel_increase_partition_power()
builder.save(self.builder_file)
ring_data = builder.get_ring()
ring_data.save(self.ring_file)
# Ensure the proxy uses the changed ring
Manager(['proxy']).restart()
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Overwrite existing object - to ensure that older timestamp files
# will be cleaned up properly later
client.put_object(self.url, self.token, container, obj, self.data)
# Cleanup old objects in the wrong location
for device in self.devices:
self.assertEqual(0, cleanup(skip_mount_check=True, devices=device))
# Ensure objects are still accessible
client.head_object(self.url, self.token, container, obj)
client.head_object(self.url, self.token, container, obj2)
# Ensure data in old or relinked object locations is removed
if not cancel:
for fn in org_locations:
self.assertFalse(os.path.exists(fn))
else:
for fn in new_locations:
self.assertFalse(os.path.exists(fn))
class TestReplPartPowerIncrease(TestPartPowerIncrease, ReplProbeTest):
def test_main(self):
self._test_main()
def test_canceled(self):
self._test_main(cancel=True)
class TestECPartPowerIncrease(TestPartPowerIncrease, ECProbeTest):
def test_main(self):
self._test_main()
def test_canceled(self):
self._test_main(cancel=True)
if __name__ == '__main__':
main()
| apache-2.0 |
stefansommer/jetflows | code/kernels/pyGaussian.py | 2 | 6717 | #
# This file is part of jetflows.
#
# Copyright (C) 2014, Henry O. Jacobs (hoj201@gmail.com), Stefan Sommer (sommer@di.ku.dk)
# https://github.com/nefan/jetflows.git
#
# jetflows is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jetflows is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jetflows. If not, see <http://www.gnu.org/licenses/>.
#
"""
Wrapper for gaussian.py
"""
import numpy as np
import gaussian
N = None
DIM = None
SIGMA = None
def Gaussian_monomial( x , n ):
# computes x/sigma^n * G(x)
y = x / SIGMA
store = y * np.exp( -(0.5/n) * y**2 )
return store**n
def diff_1D_Gaussian_cpp(x,k,SIGMA,parallel=False):
s = x.shape
out = np.zeros(x.size)
gaussian.diff_1D_Gaussian_parallel_cpp(x.flatten(),out,k,SIGMA,parallel)
return out.reshape(s)
def diff_1D_Gaussian( x , k ):
# returns the kth derivative of a 1 dimensional Guassian
G = np.exp( -0.5 * (x / SIGMA)**2 )
if k == 0:
return G
elif k==1:
return -1.*Gaussian_monomial(x,1) / (SIGMA)
elif k==2:
return ( Gaussian_monomial(x,2) - G ) / (SIGMA**2)
elif k==3:
return -1.*( Gaussian_monomial(x,3) - 3.*Gaussian_monomial(x,1)) / (SIGMA**3)
elif k==4:
return (Gaussian_monomial(x,4) - 6.*Gaussian_monomial(x,2) + 3.*G ) / (SIGMA**4)
elif k==5:
return (-1.*(Gaussian_monomial(x,5) - 10.*Gaussian_monomial(x,3) + 15.*Gaussian_monomial(x,1) ))/(SIGMA**5)
elif k==6:
return (Gaussian_monomial(x,6) - 15.*Gaussian_monomial(x,4) + 45.*Gaussian_monomial(x,2) -15.*G)/(SIGMA**6)
else:
print 'error in diff_1D_Guassian: k='+str(k)
return 'error'
def derivatives_of_Gaussians( p1 , p2, parallel=False ):
N_p1 = p1.shape[0]
N_p2 = p2.shape[0]
r_sq = np.zeros( [ N_p1 , N_p2 ] )
dx = np.zeros( [N_p1,N_p2,DIM] )
for a in range(0,DIM):
dx[:,:,a] = np.outer( p1[:,a] , np.ones(N_p2) ) - np.outer( np.ones(N_p1), p2[:,a] )
r_sq[:,:] = dx[:,:,a]**2 + r_sq[:,:]
G = np.exp( - r_sq / (2.*SIGMA**2) )
DG = np.ones( [N_p1,N_p2,DIM] )
D2G = np.ones( [N_p1,N_p2,DIM,DIM] )
D3G = np.ones( [N_p1,N_p2,DIM,DIM,DIM] )
D4G = np.ones( [N_p1,N_p2,DIM,DIM,DIM,DIM] )
D5G = np.ones( [N_p1,N_p2,DIM,DIM,DIM,DIM,DIM] )
D6G = np.ones( [N_p1,N_p2,DIM,DIM,DIM,DIM,DIM,DIM] )
alpha = np.int_(np.zeros(DIM))
#one derivative
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
#DG[:,:,a] = DG[:,:,a]*diff_1D_Gaussian( dx[:,:,b] , alpha[b] )
DG[:,:,a] = DG[:,:,a]*diff_1D_Gaussian_cpp( dx[:,:,b] , alpha[b], SIGMA, parallel )
alpha[a] = 0
#two derivatives
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
alpha[b] = alpha[b] + 1
for c in range(0,DIM):
#D2G[:,:,a,b] = D2G[:,:,a,b]*diff_1D_Gaussian( dx[:,:,c] , alpha[c] )
D2G[:,:,a,b] = D2G[:,:,a,b]*diff_1D_Gaussian_cpp( dx[:,:,c] , alpha[c], SIGMA, parallel )
alpha[b] = alpha[b] - 1
alpha[a] = 0
#three derivatives
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
alpha[b] = alpha[b] + 1
for c in range(0,DIM):
alpha[c] = alpha[c] + 1
for d in range(0,DIM):
#D3G[:,:,a,b,c] = D3G[:,:,a,b,c]*diff_1D_Gaussian( dx[:,:,d] , alpha[d] )
D3G[:,:,a,b,c] = D3G[:,:,a,b,c]*diff_1D_Gaussian_cpp( dx[:,:,d] , alpha[d], SIGMA, parallel )
alpha[c] = alpha[c] - 1
alpha[b] = alpha[b] - 1
alpha[a] = 0
#four derivatives
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
alpha[b] = alpha[b] + 1
for c in range(0,DIM):
alpha[c] = alpha[c] + 1
for d in range(0,DIM):
alpha[d] = alpha[d] + 1
for e in range(0,DIM):
#D4G[:,:,a,b,c,d] = D4G[:,:,a,b,c,d]*diff_1D_Gaussian( dx[:,:,e] , alpha[e] )
D4G[:,:,a,b,c,d] = D4G[:,:,a,b,c,d]*diff_1D_Gaussian_cpp( dx[:,:,e] , alpha[e], SIGMA, parallel )
alpha[d] = alpha[d] - 1
alpha[c] = alpha[c] - 1
alpha[b] = alpha[b] - 1
alpha[a] = 0
#five derivatives
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
alpha[b] = alpha[b] + 1
for c in range(0,DIM):
alpha[c] = alpha[c] + 1
for d in range(0,DIM):
alpha[d] = alpha[d] + 1
for e in range(0,DIM):
alpha[e] = alpha[e] + 1
for f in range(0,DIM):
#D5G[:,:,a,b,c,d,e] = D5G[:,:,a,b,c,d,e]*diff_1D_Gaussian( dx[:,:,f] , alpha[f] )
D5G[:,:,a,b,c,d,e] = D5G[:,:,a,b,c,d,e]*diff_1D_Gaussian_cpp( dx[:,:,f] , alpha[f], SIGMA, parallel )
alpha[e] = alpha[e] - 1
alpha[d] = alpha[d] - 1
alpha[c] = alpha[c] - 1
alpha[b] = alpha[b] - 1
alpha[a] = 0
#six derivatives
for a in range(0,DIM):
alpha[a] = 1
for b in range(0,DIM):
alpha[b] = alpha[b] + 1
for c in range(0,DIM):
alpha[c] = alpha[c] + 1
for d in range(0,DIM):
alpha[d] = alpha[d] + 1
for e in range(0,DIM):
alpha[e] = alpha[e] + 1
for f in range(0,DIM):
alpha[f] = alpha[f] + 1
for g in range(0,DIM):
#D6G[:,:,a,b,c,d,e,f] = D6G[:,:,a,b,c,d,e,f]*diff_1D_Gaussian( dx[:,:,g] , alpha[g] )
D6G[:,:,a,b,c,d,e,f] = D6G[:,:,a,b,c,d,e,f]*diff_1D_Gaussian_cpp( dx[:,:,g] , alpha[g], SIGMA, parallel )
alpha[f] = alpha[f] - 1
alpha[e] = alpha[e] - 1
alpha[d] = alpha[d] - 1
alpha[c] = alpha[c] - 1
alpha[b] = alpha[b] - 1
alpha[a] = 0
return G, DG, D2G, D3G, D4G, D5G , D6G
| agpl-3.0 |
frappe/erpnext | erpnext/payroll/doctype/additional_salary/test_additional_salary.py | 3 | 2091 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe, erpnext
from frappe.utils import nowdate, add_days
from erpnext.hr.doctype.employee.test_employee import make_employee
from erpnext.payroll.doctype.salary_component.test_salary_component import create_salary_component
from erpnext.payroll.doctype.salary_slip.test_salary_slip import make_employee_salary_slip, setup_test
from erpnext.payroll.doctype.salary_structure.test_salary_structure import make_salary_structure
class TestAdditionalSalary(unittest.TestCase):
def setUp(self):
setup_test()
def tearDown(self):
for dt in ["Salary Slip", "Additional Salary", "Salary Structure Assignment", "Salary Structure"]:
frappe.db.sql("delete from `tab%s`" % dt)
def test_recurring_additional_salary(self):
amount = 0
salary_component = None
emp_id = make_employee("test_additional@salary.com")
frappe.db.set_value("Employee", emp_id, "relieving_date", add_days(nowdate(), 1800))
salary_structure = make_salary_structure("Test Salary Structure Additional Salary", "Monthly", employee=emp_id)
add_sal = get_additional_salary(emp_id)
ss = make_employee_salary_slip("test_additional@salary.com", "Monthly", salary_structure=salary_structure.name)
for earning in ss.earnings:
if earning.salary_component == "Recurring Salary Component":
amount = earning.amount
salary_component = earning.salary_component
self.assertEqual(amount, add_sal.amount)
self.assertEqual(salary_component, add_sal.salary_component)
def get_additional_salary(emp_id):
create_salary_component("Recurring Salary Component")
add_sal = frappe.new_doc("Additional Salary")
add_sal.employee = emp_id
add_sal.salary_component = "Recurring Salary Component"
add_sal.is_recurring = 1
add_sal.from_date = add_days(nowdate(), -50)
add_sal.to_date = add_days(nowdate(), 180)
add_sal.amount = 5000
add_sal.currency = erpnext.get_default_currency()
add_sal.save()
add_sal.submit()
return add_sal
| gpl-3.0 |
GustJc/PyPhysics | experiments/01-sprited-simple-physics/main.py | 1 | 2946 | import sys
sys.path.append("../../modules")
from mymath import *
from physics.body2d import body2d
from gameobject import *
import pygame.gfxdraw
import pygame
show_collisions = False
show_text = True
ball_group = pygame.sprite.Group()
tx_instruction = None
class App():
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.weight = 640, 480
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self._running = True
image = pygame.image.load("ball.png")
image = image.convert_alpha()
ball_sprite = gameobject(50, 92, image, ball_group)
ball_sprite.rect = ball_sprite.image.get_rect()
ball_sprite.set_hitbox(10, 10, -20, -20)
ball_sprite.body.mass = 1.0
red_ball = gameobject(200,92, image.copy() )
red_ball.image.fill((80,0,0,127), (red_ball.rect), pygame.BLEND_RGB_ADD)
red_ball.set_hitbox(10, 10, -20, -20)
red_ball.body.mass = 1.1
ball_group.add(red_ball)
bigball_sprite = gameobject(250, -20, image, ball_group)
bigball_sprite.image = pygame.transform.scale(image, (200,200) )
bigball_sprite.rect = bigball_sprite.image.get_rect()
bigball_sprite.set_hitbox(25, 25, -50, -50)
bigball_sprite.body.mass = 1.5
#Font
myfont = pygame.font.SysFont("monospace", 16)
global tx_instruction
tx_instruction = myfont.render("Hold space to add force. H to show hitbox. T to hide text.", 0, (255,0,0) )
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_h:
global show_collisions
show_collisions = not show_collisions
if event.key == pygame.K_t:
global show_text
show_text = not show_text
def on_update(self, seconds):
#async events if needed:
# keys = pygame.key.get_pressed()
# if keys[pygame.K_SPACE]:
#update
ball_group.update(seconds)
pass
def on_render(self):
ball_group.draw(self._display_surf)
for ball in ball_group:
global show_collisions
if show_collisions:
pygame.gfxdraw.box(self._display_surf, ball.get_hitbox(), (100,255,0,127))
global show_text
if show_text:
self._display_surf.blit(tx_instruction, (5, 5) )
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
last_time = pygame.time.get_ticks()
while self._running:
self._display_surf.fill((0,0,0))
for event in pygame.event.get():
self.on_event(event)
dt = pygame.time.get_ticks() - last_time
dt *= 0.001
last_time = pygame.time.get_ticks()
self.on_update(dt)
self.on_render()
pygame.display.flip()
self.on_cleanup()
if __name__ == "__main__":
theApp = App()
theApp.on_execute()
| gpl-3.0 |
Ravenm/2143-OOP-NASH | python3env/Lib/site-packages/pip/_vendor/requests/sessions.py | 165 | 24544 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| cc0-1.0 |
synergeticsedx/deployment-wipro | common/lib/calc/calc/calc.py | 176 | 13572 | """
Parser and evaluator for FormulaResponse and NumericalResponse
Uses pyparsing to parse. Main function as of now is evaluator().
"""
import math
import operator
import numbers
import numpy
import scipy.constants
import functions
from pyparsing import (
Word, Literal, CaselessLiteral, ZeroOrMore, MatchFirst, Optional, Forward,
Group, ParseResults, stringEnd, Suppress, Combine, alphas, nums, alphanums
)
DEFAULT_FUNCTIONS = {
'sin': numpy.sin,
'cos': numpy.cos,
'tan': numpy.tan,
'sec': functions.sec,
'csc': functions.csc,
'cot': functions.cot,
'sqrt': numpy.sqrt,
'log10': numpy.log10,
'log2': numpy.log2,
'ln': numpy.log,
'exp': numpy.exp,
'arccos': numpy.arccos,
'arcsin': numpy.arcsin,
'arctan': numpy.arctan,
'arcsec': functions.arcsec,
'arccsc': functions.arccsc,
'arccot': functions.arccot,
'abs': numpy.abs,
'fact': math.factorial,
'factorial': math.factorial,
'sinh': numpy.sinh,
'cosh': numpy.cosh,
'tanh': numpy.tanh,
'sech': functions.sech,
'csch': functions.csch,
'coth': functions.coth,
'arcsinh': numpy.arcsinh,
'arccosh': numpy.arccosh,
'arctanh': numpy.arctanh,
'arcsech': functions.arcsech,
'arccsch': functions.arccsch,
'arccoth': functions.arccoth
}
DEFAULT_VARIABLES = {
'i': numpy.complex(0, 1),
'j': numpy.complex(0, 1),
'e': numpy.e,
'pi': numpy.pi,
'k': scipy.constants.k, # Boltzmann: 1.3806488e-23 (Joules/Kelvin)
'c': scipy.constants.c, # Light Speed: 2.998e8 (m/s)
'T': 298.15, # Typical room temperature: 298.15 (Kelvin), same as 25C/77F
'q': scipy.constants.e # Fund. Charge: 1.602176565e-19 (Coulombs)
}
# We eliminated the following extreme suffixes:
# P (1e15), E (1e18), Z (1e21), Y (1e24),
# f (1e-15), a (1e-18), z (1e-21), y (1e-24)
# since they're rarely used, and potentially confusing.
# They may also conflict with variables if we ever allow e.g.
# 5R instead of 5*R
SUFFIXES = {
'%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
'c': 1e-2, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12
}
class UndefinedVariable(Exception):
"""
Indicate when a student inputs a variable which was not expected.
"""
pass
def lower_dict(input_dict):
"""
Convert all keys in a dictionary to lowercase; keep their original values.
Keep in mind that it is possible (but not useful?) to define different
variables that have the same lowercase representation. It would be hard to
tell which is used in the final dict and which isn't.
"""
return {k.lower(): v for k, v in input_dict.iteritems()}
# The following few functions define evaluation actions, which are run on lists
# of results from each parse component. They convert the strings and (previously
# calculated) numbers into the number that component represents.
def super_float(text):
"""
Like float, but with SI extensions. 1k goes to 1000.
"""
if text[-1] in SUFFIXES:
return float(text[:-1]) * SUFFIXES[text[-1]]
else:
return float(text)
def eval_number(parse_result):
"""
Create a float out of its string parts.
e.g. [ '7.13', 'e', '3' ] -> 7130
Calls super_float above.
"""
return super_float("".join(parse_result))
def eval_atom(parse_result):
"""
Return the value wrapped by the atom.
In the case of parenthesis, ignore them.
"""
# Find first number in the list
result = next(k for k in parse_result if isinstance(k, numbers.Number))
return result
def eval_power(parse_result):
"""
Take a list of numbers and exponentiate them, right to left.
e.g. [ 2, 3, 2 ] -> 2^3^2 = 2^(3^2) -> 512
(not to be interpreted (2^3)^2 = 64)
"""
# `reduce` will go from left to right; reverse the list.
parse_result = reversed(
[k for k in parse_result
if isinstance(k, numbers.Number)] # Ignore the '^' marks.
)
# Having reversed it, raise `b` to the power of `a`.
power = reduce(lambda a, b: b ** a, parse_result)
return power
def eval_parallel(parse_result):
"""
Compute numbers according to the parallel resistors operator.
BTW it is commutative. Its formula is given by
out = 1 / (1/in1 + 1/in2 + ...)
e.g. [ 1, 2 ] -> 2/3
Return NaN if there is a zero among the inputs.
"""
if len(parse_result) == 1:
return parse_result[0]
if 0 in parse_result:
return float('nan')
reciprocals = [1. / e for e in parse_result
if isinstance(e, numbers.Number)]
return 1. / sum(reciprocals)
def eval_sum(parse_result):
"""
Add the inputs, keeping in mind their sign.
[ 1, '+', 2, '-', 3 ] -> 0
Allow a leading + or -.
"""
total = 0.0
current_op = operator.add
for token in parse_result:
if token == '+':
current_op = operator.add
elif token == '-':
current_op = operator.sub
else:
total = current_op(total, token)
return total
def eval_product(parse_result):
"""
Multiply the inputs.
[ 1, '*', 2, '/', 3 ] -> 0.66
"""
prod = 1.0
current_op = operator.mul
for token in parse_result:
if token == '*':
current_op = operator.mul
elif token == '/':
current_op = operator.truediv
else:
prod = current_op(prod, token)
return prod
def add_defaults(variables, functions, case_sensitive):
"""
Create dictionaries with both the default and user-defined variables.
"""
all_variables = dict(DEFAULT_VARIABLES)
all_functions = dict(DEFAULT_FUNCTIONS)
all_variables.update(variables)
all_functions.update(functions)
if not case_sensitive:
all_variables = lower_dict(all_variables)
all_functions = lower_dict(all_functions)
return (all_variables, all_functions)
def evaluator(variables, functions, math_expr, case_sensitive=False):
"""
Evaluate an expression; that is, take a string of math and return a float.
-Variables are passed as a dictionary from string to value. They must be
python numbers.
-Unary functions are passed as a dictionary from string to function.
"""
# No need to go further.
if math_expr.strip() == "":
return float('nan')
# Parse the tree.
math_interpreter = ParseAugmenter(math_expr, case_sensitive)
math_interpreter.parse_algebra()
# Get our variables together.
all_variables, all_functions = add_defaults(variables, functions, case_sensitive)
# ...and check them
math_interpreter.check_variables(all_variables, all_functions)
# Create a recursion to evaluate the tree.
if case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
evaluate_actions = {
'number': eval_number,
'variable': lambda x: all_variables[casify(x[0])],
'function': lambda x: all_functions[casify(x[0])](x[1]),
'atom': eval_atom,
'power': eval_power,
'parallel': eval_parallel,
'product': eval_product,
'sum': eval_sum
}
return math_interpreter.reduce_tree(evaluate_actions)
class ParseAugmenter(object):
"""
Holds the data for a particular parse.
Retains the `math_expr` and `case_sensitive` so they needn't be passed
around method to method.
Eventually holds the parse tree and sets of variables as well.
"""
def __init__(self, math_expr, case_sensitive=False):
"""
Create the ParseAugmenter for a given math expression string.
Do the parsing later, when called like `OBJ.parse_algebra()`.
"""
self.case_sensitive = case_sensitive
self.math_expr = math_expr
self.tree = None
self.variables_used = set()
self.functions_used = set()
def vpa(tokens):
"""
When a variable is recognized, store it in `variables_used`.
"""
varname = tokens[0][0]
self.variables_used.add(varname)
def fpa(tokens):
"""
When a function is recognized, store it in `functions_used`.
"""
varname = tokens[0][0]
self.functions_used.add(varname)
self.variable_parse_action = vpa
self.function_parse_action = fpa
def parse_algebra(self):
"""
Parse an algebraic expression into a tree.
Store a `pyparsing.ParseResult` in `self.tree` with proper groupings to
reflect parenthesis and order of operations. Leave all operators in the
tree and do not parse any strings of numbers into their float versions.
Adding the groups and result names makes the `repr()` of the result
really gross. For debugging, use something like
print OBJ.tree.asXML()
"""
# 0.33 or 7 or .34 or 16.
number_part = Word(nums)
inner_number = (number_part + Optional("." + Optional(number_part))) | ("." + number_part)
# pyparsing allows spaces between tokens--`Combine` prevents that.
inner_number = Combine(inner_number)
# SI suffixes and percent.
number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys())
# 0.33k or 17
plus_minus = Literal('+') | Literal('-')
number = Group(
Optional(plus_minus) +
inner_number +
Optional(CaselessLiteral("E") + Optional(plus_minus) + number_part) +
Optional(number_suffix)
)
number = number("number")
# Predefine recursive variables.
expr = Forward()
# Handle variables passed in. They must start with letters/underscores
# and may contain numbers afterward.
inner_varname = Word(alphas + "_", alphanums + "_")
varname = Group(inner_varname)("variable")
varname.setParseAction(self.variable_parse_action)
# Same thing for functions.
function = Group(inner_varname + Suppress("(") + expr + Suppress(")"))("function")
function.setParseAction(self.function_parse_action)
atom = number | function | varname | "(" + expr + ")"
atom = Group(atom)("atom")
# Do the following in the correct order to preserve order of operation.
pow_term = atom + ZeroOrMore("^" + atom)
pow_term = Group(pow_term)("power")
par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k
par_term = Group(par_term)("parallel")
prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4
prod_term = Group(prod_term)("product")
sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3
sum_term = Group(sum_term)("sum")
# Finish the recursion.
expr << sum_term # pylint: disable=pointless-statement
self.tree = (expr + stringEnd).parseString(self.math_expr)[0]
def reduce_tree(self, handle_actions, terminal_converter=None):
"""
Call `handle_actions` recursively on `self.tree` and return result.
`handle_actions` is a dictionary of node names (e.g. 'product', 'sum',
etc&) to functions. These functions are of the following form:
-input: a list of processed child nodes. If it includes any terminal
nodes in the list, they will be given as their processed forms also.
-output: whatever to be passed to the level higher, and what to
return for the final node.
`terminal_converter` is a function that takes in a token and returns a
processed form. The default of `None` just leaves them as strings.
"""
def handle_node(node):
"""
Return the result representing the node, using recursion.
Call the appropriate `handle_action` for this node. As its inputs,
feed it the output of `handle_node` for each child node.
"""
if not isinstance(node, ParseResults):
# Then treat it as a terminal node.
if terminal_converter is None:
return node
else:
return terminal_converter(node)
node_name = node.getName()
if node_name not in handle_actions: # pragma: no cover
raise Exception(u"Unknown branch name '{}'".format(node_name))
action = handle_actions[node_name]
handled_kids = [handle_node(k) for k in node]
return action(handled_kids)
# Find the value of the entire tree.
return handle_node(self.tree)
def check_variables(self, valid_variables, valid_functions):
"""
Confirm that all the variables used in the tree are valid/defined.
Otherwise, raise an UndefinedVariable containing all bad variables.
"""
if self.case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
# Test if casify(X) is valid, but return the actual bad input (i.e. X)
bad_vars = set(var for var in self.variables_used
if casify(var) not in valid_variables)
bad_vars.update(func for func in self.functions_used
if casify(func) not in valid_functions)
if bad_vars:
raise UndefinedVariable(' '.join(sorted(bad_vars)))
| agpl-3.0 |
tiradani/gsh | osg_gsh/process_management.py | 1 | 3017 | import subprocess
"""
This is a blatant copy of the convenience functions contained in the subprocess
module in python 2.6+. The reason for this is to present the exact same call
capability on systems with python 2.4. (e.g RHEL5/CentOS5/SL5)
"""
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return subprocess.Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
| bsd-3-clause |
NEricN/RobotCSimulator | Python/App/Lib/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py | 949 | 19104 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| apache-2.0 |
kliput/onezone-gui | bamboos/docker/swift_up.py | 2 | 1121 | #!/usr/bin/env python
# coding=utf-8
"""Authors: Michal Wrona
Copyright (C) 2016 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
A script that brings up a Swift storage.
Run the script with -h flag to learn about script's running options.
"""
from __future__ import print_function
import argparse
import json
from environment import common, swift
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Bring up Swift storage.')
parser.add_argument(
'-i', '--image',
action='store',
default='onedata/dockswift',
help='docker image to use for the container',
dest='image')
parser.add_argument(
'-c', '--container',
action='append',
default=[],
help='container name',
dest='containers')
parser.add_argument(
'-u', '--uid',
action='store',
default=common.generate_uid(),
help='uid that will be concatenated to docker names',
dest='uid')
args = parser.parse_args()
config = swift.up(args.image, args.containers, 'storage', args.uid)
print(json.dumps(config))
| mit |
harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/cloudsearch/test_connection.py | 12 | 8523 | #!/usr/bin env python
from tests.unit import AWSMockServiceTestCase
from boto.cloudsearch.domain import Domain
from boto.cloudsearch.layer1 import Layer1
import json
class TestCloudSearchCreateDomain(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return """
<CreateDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<CreateDomainResult>
<DomainStatus>
<SearchPartitionCount>0</SearchPartitionCount>
<SearchService>
<Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
<Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</SearchService>
<NumSearchableDocs>0</NumSearchableDocs>
<Created>true</Created>
<DomainId>1234567890/demo</DomainId>
<Processing>false</Processing>
<SearchInstanceCount>0</SearchInstanceCount>
<DomainName>demo</DomainName>
<RequiresIndexDocuments>false</RequiresIndexDocuments>
<Deleted>false</Deleted>
<DocService>
<Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
<Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</DocService>
</DomainStatus>
</CreateDomainResult>
<ResponseMetadata>
<RequestId>00000000-0000-0000-0000-000000000000</RequestId>
</ResponseMetadata>
</CreateDomainResponse>
"""
def test_create_domain(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
self.assert_request_parameters({
'Action': 'CreateDomain',
'DomainName': 'demo',
'Version': '2011-02-01',
})
def test_cloudsearch_connect_result_endpoints(self):
"""Check that endpoints & ARNs are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.doc_service_arn,
"arn:aws:cs:us-east-1:1234567890:doc/demo")
self.assertEqual(
domain.doc_service_endpoint,
"doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
self.assertEqual(domain.search_service_arn,
"arn:aws:cs:us-east-1:1234567890:search/demo")
self.assertEqual(
domain.search_service_endpoint,
"search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_connect_result_statuses(self):
"""Check that domain statuses are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.created, True)
self.assertEqual(domain.processing, False)
self.assertEqual(domain.requires_index_documents, False)
self.assertEqual(domain.deleted, False)
def test_cloudsearch_connect_result_details(self):
"""Check that the domain information is correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
self.assertEqual(domain.id, "1234567890/demo")
self.assertEqual(domain.name, "demo")
def test_cloudsearch_documentservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
document = domain.get_document_service()
self.assertEqual(
document.endpoint,
"doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_searchservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response)
search = domain.get_search_service()
self.assertEqual(
search.endpoint,
"search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return """
<DeleteDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<DeleteDomainResult>
<DomainStatus>
<SearchPartitionCount>0</SearchPartitionCount>
<SearchService>
<Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
<Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</SearchService>
<NumSearchableDocs>0</NumSearchableDocs>
<Created>true</Created>
<DomainId>1234567890/demo</DomainId>
<Processing>false</Processing>
<SearchInstanceCount>0</SearchInstanceCount>
<DomainName>demo</DomainName>
<RequiresIndexDocuments>false</RequiresIndexDocuments>
<Deleted>false</Deleted>
<DocService>
<Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
<Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
</DocService>
</DomainStatus>
</DeleteDomainResult>
<ResponseMetadata>
<RequestId>00000000-0000-0000-0000-000000000000</RequestId>
</ResponseMetadata>
</DeleteDomainResponse>
"""
def test_cloudsearch_deletion(self):
"""
Check that the correct arguments are sent to AWS when creating a
cloudsearch connection.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_domain('demo')
self.assert_request_parameters({
'Action': 'DeleteDomain',
'DomainName': 'demo',
'Version': '2011-02-01',
})
class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return """
<IndexDocumentsResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
<IndexDocumentsResult>
<FieldNames>
<member>average_score</member>
<member>brand_id</member>
<member>colors</member>
<member>context</member>
<member>context_owner</member>
<member>created_at</member>
<member>creator_id</member>
<member>description</member>
<member>file_size</member>
<member>format</member>
<member>has_logo</member>
<member>has_messaging</member>
<member>height</member>
<member>image_id</member>
<member>ingested_from</member>
<member>is_advertising</member>
<member>is_photo</member>
<member>is_reviewed</member>
<member>modified_at</member>
<member>subject_date</member>
<member>tags</member>
<member>title</member>
<member>width</member>
</FieldNames>
</IndexDocumentsResult>
<ResponseMetadata>
<RequestId>eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a</RequestId>
</ResponseMetadata>
</IndexDocumentsResponse>
"""
def test_cloudsearch_index_documents(self):
"""
Check that the correct arguments are sent to AWS when indexing a
domain.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.index_documents('demo')
self.assert_request_parameters({
'Action': 'IndexDocuments',
'DomainName': 'demo',
'Version': '2011-02-01',
})
def test_cloudsearch_index_documents_resp(self):
"""
Check that the AWS response is being parsed correctly when indexing a
domain.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.index_documents('demo')
self.assertEqual(api_response, ['average_score', 'brand_id', 'colors',
'context', 'context_owner',
'created_at', 'creator_id',
'description', 'file_size', 'format',
'has_logo', 'has_messaging', 'height',
'image_id', 'ingested_from',
'is_advertising', 'is_photo',
'is_reviewed', 'modified_at',
'subject_date', 'tags', 'title',
'width'])
| gpl-3.0 |
pidydx/grr | grr/lib/master.py | 2 | 1810 | #!/usr/bin/env python
"""The master watcher class.
It often makes sense to have a backup instance of the GRR server
environment running. If you decide to do so, override this class with
functionality to determine if this instance is currently active
("Master") or not and store the result using the SetMaster
function. Note that you can have multiple Workers and
Frontend Servers running without any problems as long as you don't use
data store replication. Only if you work on a replicated database you
will run into race conditions and have to disable the backup instances.
"""
import logging
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import stats
class DefaultMasterWatcher(object):
"""A Master Watcher that always returns True."""
__metaclass__ = registry.MetaclassRegistry
is_master = True
def __init__(self):
super(DefaultMasterWatcher, self).__init__()
self.SetMaster(True)
def IsMaster(self):
return self.is_master
def SetMaster(self, master=True):
"""Switch the is_master stat variable."""
if master:
logging.info("data center is now active.")
stats.STATS.SetGaugeValue("is_master", 1)
self.is_master = True
else:
logging.info("data center became inactive.")
stats.STATS.SetGaugeValue("is_master", 0)
self.is_master = False
MASTER_WATCHER = None
class MasterInit(registry.InitHook):
"""Init hook class for the master watcher."""
def RunOnce(self):
# stat is set to 0 at registration time.
stats.STATS.RegisterGaugeMetric("is_master", int)
global MASTER_WATCHER # pylint: disable=global-statement
watcher_name = config_lib.CONFIG["Server.master_watcher_class"]
watcher_cls = DefaultMasterWatcher.classes[watcher_name]
MASTER_WATCHER = watcher_cls()
| apache-2.0 |
jrabbit/ubotu-fr | plugins/Utilities/config.py | 17 | 2370 | ###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Utilities', True)
Utilities = conf.registerPlugin('Utilities')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Utilities, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
jtrobec/pants | src/python/pants/bin/extension_loader.py | 1 | 6479 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import importlib
import traceback
from pkg_resources import Requirement
from twitter.common.collections import OrderedSet
from pants.base.exceptions import BackendConfigurationError
from pants.build_graph.build_configuration import BuildConfiguration
class PluginLoadingError(Exception): pass
class PluginNotFound(PluginLoadingError): pass
class PluginLoadOrderError(PluginLoadingError): pass
def load_plugins_and_backends(plugins, working_set, backends):
"""Load named plugins and source backends
:param list<str> plugins: Plugins to load (see `load_plugins`).
:param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
:param list<str> backends: Source backends to load (see `load_build_configuration_from_source`).
"""
build_configuration = BuildConfiguration()
load_plugins(build_configuration, plugins or [], working_set)
load_build_configuration_from_source(build_configuration, additional_backends=backends or [])
return build_configuration
def load_plugins(build_configuration, plugins, working_set):
"""Load named plugins from the current working_set into the supplied build_configuration
"Loading" a plugin here refers to calling registration methods -- it is assumed each plugin
is already on the path and an error will be thrown if it is not. Plugins should define their
entrypoints in the `pantsbuild.plugin` group when configuring their distribution.
Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods
are called if those entry points are defined.
* Plugins are loaded in the order they are provided. *
This is important as loading can add, remove or replace exiting tasks installed by other plugins.
If a plugin needs to assert that another plugin is registered before it, it can define an
entrypoint "load_after" which can return a list of plugins which must have been loaded before it
can be loaded. This does not change the order or what plugins are loaded in any way -- it is
purely an assertion to guard against misconfiguration.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param list<str> plugins: A list of plugin names optionally with versions, in requirement format.
eg ['widgetpublish', 'widgetgen==1.2'].
:param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
"""
loaded = {}
for plugin in plugins:
req = Requirement.parse(plugin)
dist = working_set.find(req)
if not dist:
raise PluginNotFound('Could not find plugin: {}'.format(req))
entries = dist.get_entry_map().get('pantsbuild.plugin', {})
if 'load_after' in entries:
deps = entries['load_after'].load()()
for dep_name in deps:
dep = Requirement.parse(dep_name)
if dep.key not in loaded:
raise PluginLoadOrderError('Plugin {0} must be loaded after {1}'.format(plugin, dep))
if 'build_file_aliases' in entries:
aliases = entries['build_file_aliases'].load()()
build_configuration.register_aliases(aliases)
if 'register_goals' in entries:
entries['register_goals'].load()()
if 'global_subsystems' in entries:
subsystems = entries['global_subsystems'].load()()
build_configuration.register_subsystems(subsystems)
loaded[dist.as_requirement().key] = dist
def load_build_configuration_from_source(build_configuration, additional_backends=None):
"""Installs pants backend packages to provide targets and helper functions to BUILD files and
goals to the cli.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param additional_backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration.
"""
# Note: pants.core_tasks must be first in this list, as it registers various stubs
# that other tasks can use for scheduling against.
# TODO: Allow repos to opt in to any backend (but not to core_tasks, which must always
# be loaded).
backend_packages = ['pants.core_tasks',
'pants.backend.authentication',
'pants.backend.core',
'pants.backend.python',
'pants.backend.jvm',
'pants.backend.codegen',
'pants.backend.project_info']
for backend_package in OrderedSet(backend_packages + (additional_backends or [])):
load_backend(build_configuration, backend_package)
def load_backend(build_configuration, backend_package):
"""Installs the given backend package into the build configuration.
:param build_configuration the :class:``pants.build_graph.build_configuration.BuildConfiguration`` to
install the backend plugin into.
:param string backend_package: the package name containing the backend plugin register module that
provides the plugin entrypoints.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration."""
backend_module = backend_package + '.register'
try:
module = importlib.import_module(backend_module)
except ImportError as e:
traceback.print_exc()
raise BackendConfigurationError('Failed to load the {backend} backend: {error}'
.format(backend=backend_module, error=e))
def invoke_entrypoint(name):
entrypoint = getattr(module, name, lambda: None)
try:
return entrypoint()
except TypeError as e:
traceback.print_exc()
raise BackendConfigurationError(
'Entrypoint {entrypoint} in {backend} must be a zero-arg callable: {error}'
.format(entrypoint=name, backend=backend_module, error=e))
build_file_aliases = invoke_entrypoint('build_file_aliases')
if build_file_aliases:
build_configuration.register_aliases(build_file_aliases)
subsystems = invoke_entrypoint('global_subsystems')
if subsystems:
build_configuration.register_subsystems(subsystems)
invoke_entrypoint('register_goals')
| apache-2.0 |
newswangerd/ansible | test/units/module_utils/facts/other/test_facter.py | 35 | 8062 | # unit tests for ansible other facter fact collector
# -*- coding: utf-8 -*-
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import Mock, patch
from .. base import BaseFactsTest
from ansible.module_utils.facts.other.facter import FacterFactCollector
facter_json_output = '''
{
"operatingsystemmajrelease": "25",
"hardwareisa": "x86_64",
"kernel": "Linux",
"path": "/home/testuser/src/ansible/bin:/home/testuser/perl5/bin:/home/testuser/perl5/bin:/home/testuser/bin:/home/testuser/.local/bin:/home/testuser/pythons/bin:/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/testuser/.cabal/bin:/home/testuser/gopath/bin:/home/testuser/.rvm/bin",
"memorysize": "15.36 GB",
"memoryfree": "4.88 GB",
"swapsize": "7.70 GB",
"swapfree": "6.75 GB",
"swapsize_mb": "7880.00",
"swapfree_mb": "6911.41",
"memorysize_mb": "15732.95",
"memoryfree_mb": "4997.68",
"lsbmajdistrelease": "25",
"macaddress": "02:42:ea:15:d8:84",
"id": "testuser",
"domain": "example.com",
"augeasversion": "1.7.0",
"os": {
"name": "Fedora",
"family": "RedHat",
"release": {
"major": "25",
"full": "25"
},
"lsb": {
"distcodename": "TwentyFive",
"distid": "Fedora",
"distdescription": "Fedora release 25 (Twenty Five)",
"release": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch",
"distrelease": "25",
"majdistrelease": "25"
}
},
"processors": {
"models": [
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz"
],
"count": 8,
"physicalcount": 1
},
"architecture": "x86_64",
"hardwaremodel": "x86_64",
"operatingsystem": "Fedora",
"processor0": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor1": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor2": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor3": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor4": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor5": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor6": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processor7": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
"processorcount": 8,
"uptime_seconds": 1558090,
"fqdn": "myhostname.example.com",
"rubyversion": "2.3.3",
"gid": "testuser",
"physicalprocessorcount": 1,
"netmask": "255.255.0.0",
"uniqueid": "a8c01301",
"uptime_days": 18,
"interfaces": "docker0,em1,lo,vethf20ff12,virbr0,virbr1,virbr0_nic,virbr1_nic,wlp4s0",
"ipaddress_docker0": "172.17.0.1",
"macaddress_docker0": "02:42:ea:15:d8:84",
"netmask_docker0": "255.255.0.0",
"mtu_docker0": 1500,
"macaddress_em1": "3c:97:0e:e9:28:8e",
"mtu_em1": 1500,
"ipaddress_lo": "127.0.0.1",
"netmask_lo": "255.0.0.0",
"mtu_lo": 65536,
"macaddress_vethf20ff12": "ae:6e:2b:1e:a1:31",
"mtu_vethf20ff12": 1500,
"ipaddress_virbr0": "192.168.137.1",
"macaddress_virbr0": "52:54:00:ce:82:5e",
"netmask_virbr0": "255.255.255.0",
"mtu_virbr0": 1500,
"ipaddress_virbr1": "192.168.121.1",
"macaddress_virbr1": "52:54:00:b4:68:a9",
"netmask_virbr1": "255.255.255.0",
"mtu_virbr1": 1500,
"macaddress_virbr0_nic": "52:54:00:ce:82:5e",
"mtu_virbr0_nic": 1500,
"macaddress_virbr1_nic": "52:54:00:b4:68:a9",
"mtu_virbr1_nic": 1500,
"ipaddress_wlp4s0": "192.168.1.19",
"macaddress_wlp4s0": "5c:51:4f:e6:a8:e3",
"netmask_wlp4s0": "255.255.255.0",
"mtu_wlp4s0": 1500,
"virtual": "physical",
"is_virtual": false,
"partitions": {
"sda2": {
"size": "499091456"
},
"sda1": {
"uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
"size": "1024000",
"mount": "/boot"
}
},
"lsbdistcodename": "TwentyFive",
"lsbrelease": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch", # noqa
"filesystems": "btrfs,ext2,ext3,ext4,xfs",
"system_uptime": {
"seconds": 1558090,
"hours": 432,
"days": 18,
"uptime": "18 days"
},
"ipaddress": "172.17.0.1",
"timezone": "EDT",
"ps": "ps -ef",
"rubyplatform": "x86_64-linux",
"rubysitedir": "/usr/local/share/ruby/site_ruby",
"uptime": "18 days",
"lsbdistrelease": "25",
"operatingsystemrelease": "25",
"facterversion": "2.4.3",
"kernelrelease": "4.9.14-200.fc25.x86_64",
"lsbdistdescription": "Fedora release 25 (Twenty Five)",
"network_docker0": "172.17.0.0",
"network_lo": "127.0.0.0",
"network_virbr0": "192.168.137.0",
"network_virbr1": "192.168.121.0",
"network_wlp4s0": "192.168.1.0",
"lsbdistid": "Fedora",
"selinux": true,
"selinux_enforced": false,
"selinux_policyversion": "30",
"selinux_current_mode": "permissive",
"selinux_config_mode": "permissive",
"selinux_config_policy": "targeted",
"hostname": "myhostname",
"osfamily": "RedHat",
"kernelmajversion": "4.9",
"blockdevice_sr0_size": 1073741312,
"blockdevice_sr0_vendor": "MATSHITA",
"blockdevice_sr0_model": "DVD-RAM UJ8E2",
"blockdevice_sda_size": 256060514304,
"blockdevice_sda_vendor": "ATA",
"blockdevice_sda_model": "SAMSUNG MZ7TD256",
"blockdevices": "sda,sr0",
"uptime_hours": 432,
"kernelversion": "4.9.14"
}
'''
class TestFacterCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'facter']
valid_subsets = ['facter']
fact_namespace = 'ansible_facter'
collector_class = FacterFactCollector
def _mock_module(self):
mock_module = Mock()
mock_module.params = {'gather_subset': self.gather_subset,
'gather_timeout': 10,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value='/not/actually/facter')
mock_module.run_command = Mock(return_value=(0, facter_json_output, ''))
return mock_module
@patch('ansible.module_utils.facts.other.facter.FacterFactCollector.get_facter_output')
def test_bogus_json(self, mock_get_facter_output):
module = self._mock_module()
# bogus json
mock_get_facter_output.return_value = '{'
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict, {})
@patch('ansible.module_utils.facts.other.facter.FacterFactCollector.run_facter')
def test_facter_non_zero_return_code(self, mock_run_facter):
module = self._mock_module()
# bogus json
mock_run_facter.return_value = (1, '{}', '')
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
# This assumes no 'facter' entry at all is correct
self.assertNotIn('facter', facts_dict)
self.assertEqual(facts_dict, {})
| gpl-3.0 |
bbgw/Klampt | Python/control/robotinfo.py | 5 | 1875 | """Stores common semantic properties for robots."""
props = {'hubo-II+':{'freeBase':True,
'numLegs':2,
'feet':{'l':56,'r':62},
'legsAndBase':[0,1,2,3,4,5,50,51,52,53,54,55,56,57,58,59,60,61,62],
'lowerBody':[50,51,52,53,54,55,56,57,58,59,60,61,62],
'legs':{'l':[51,52,53,54,55,56],'r':[57,58,59,60,61,62]},
'numArms':2,
'hands':[13,34],
'arms':{'r':range(29,35),'l':range(8,14)}
},
'drchubo-v3':{'freeBase':True,
'numLegs':2,
'feet':{'l':52,'r':59},
'legsAndBase':[0,1,2,3,4,5,46,47,48,49,50,51,52,54,55,56,57,58,59],
'lowerBody':[46,47,48,49,50,51,52,54,55,56,57,58,59],
'legs':{'l':[47,48,49,50,51,52],'r':[54,55,56,57,58,59]},
'numArms':2,
'hands':[12,32],
'arms':{'l':range(6,13),'r':range(26,33)}
}
}
def freeBase(robot):
return props[robot.getName()]['freeBase']
def numLegs(robot):
return props[robot.getName()]['numLegs']
def feet(robot):
return props[robot.getName()]['feet']
def legsAndBase(robot):
return props[robot.getName()]['legsAndBase']
def lowerBody(robot):
return props[robot.getName()]['lowerBody']
def legs(robot):
return props[robot.getName()]['legs']
def leg(robot,index):
return props[robot.getName()]['legs'][index]
def numArms(robot):
return props[robot.getName()]['numArms']
def hands(robot):
return props[robot.getName()]['hands']
def arms(robot,index):
return props[robot.getName()]['arms']
def arm(robot,index):
return props[robot.getName()]['arms'][index]
| bsd-3-clause |
Deepakkothandan/ansible | lib/ansible/plugins/callback/stderr.py | 59 | 3194 | # (c) 2017, Frederic Van Espen <github@freh.be>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: stderr
callback_type: stdout
requirements:
- set as main display callback
short_description: Splits output, sending failed tasks to stderr
version_added: "2.4"
extends_documentation_fragment:
- default_callback
description:
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
- Also it does not output skipped host/task/item status
'''
from ansible import constants as C
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the stderr callback plugin, which reuses the default
callback plugin but sends error output to stderr.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'stderr'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result, errors_to_stderr=True)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR,
stderr=True)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=True)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def _handle_exception(self, result, errors_to_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=errors_to_stderr)
| gpl-3.0 |
olu1987/lntnew | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
2014cdbg3/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/locale.py | 624 | 1918 | def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
| gpl-2.0 |
AICP/external_chromium_org | chrome/common/extensions/docs/server2/api_list_data_source.py | 7 | 4151 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from data_source import DataSource
from future import Future
from operator import itemgetter
from docs_server_utils import MarkLast, StringIdentity
class APIListDataSource(DataSource):
""" This class creates a list of chrome.* APIs and chrome.experimental.* APIs
for extensions and apps that are used in the api_index.html,
experimental.html, and private_apis.html pages.
An API is considered listable if it is listed in _api_features.json,
it has a corresponding HTML file in the public template path, and one of
the following conditions is met:
- It has no "dependencies" or "extension_types" properties in _api_features
- It has an "extension_types" property in _api_features with either/both
"extension"/"platform_app" values present.
- It has a dependency in _{api,manifest,permission}_features with an
"extension_types" property where either/both "extension"/"platform_app"
values are present.
"""
def __init__(self, server_instance, _):
self._features_bundle = server_instance.features_bundle
self._api_models = server_instance.api_models
self._object_store = server_instance.object_store_creator.Create(
# Update the model when the API or Features model updates.
APIListDataSource,
category=StringIdentity(self._features_bundle.GetIdentity(),
self._api_models.GetIdentity()))
self._api_categorizer = server_instance.api_categorizer
self._availability_finder = server_instance.availability_finder
def _GenerateAPIDict(self):
def get_channel_info(api_name):
return self._availability_finder.GetAPIAvailability(api_name).channel_info
def get_api_platform(api_name):
feature = self._features_bundle.GetAPIFeatures().Get()[api_name]
return feature['platforms']
def make_dict_for_platform(platform):
platform_dict = {
'chrome': {'stable': [], 'beta': [], 'dev': [], 'trunk': []},
}
private_apis = []
experimental_apis = []
all_apis = []
for api_name, api_model in self._api_models.IterModels():
if not self._api_categorizer.IsDocumented(platform, api_name):
continue
api = {
'name': api_name,
'description': api_model.description,
'platforms': get_api_platform(api_name),
}
category = self._api_categorizer.GetCategory(platform, api_name)
if category == 'chrome':
channel_info = get_channel_info(api_name)
channel = channel_info.channel
if channel == 'stable':
version = channel_info.version
api['version'] = version
platform_dict[category][channel].append(api)
all_apis.append(api)
elif category == 'experimental':
experimental_apis.append(api)
all_apis.append(api)
elif category == 'private':
private_apis.append(api)
for channel, apis_by_channel in platform_dict['chrome'].iteritems():
apis_by_channel.sort(key=itemgetter('name'))
MarkLast(apis_by_channel)
platform_dict['chrome'][channel] = apis_by_channel
for key, apis in (('all', all_apis),
('private', private_apis),
('experimental', experimental_apis)):
apis.sort(key=itemgetter('name'))
MarkLast(apis)
platform_dict[key] = apis
return platform_dict
return {
'apps': make_dict_for_platform('apps'),
'extensions': make_dict_for_platform('extensions'),
}
def _GetCachedAPIData(self):
data_future = self._object_store.Get('api_data')
def resolve():
data = data_future.Get()
if data is None:
data = self._GenerateAPIDict()
self._object_store.Set('api_data', data)
return data
return Future(callback=resolve)
def get(self, key):
return self._GetCachedAPIData().Get().get(key)
def Cron(self):
return self._GetCachedAPIData()
| bsd-3-clause |
dsajkl/reqiop | common/lib/capa/capa/checker.py | 123 | 5899 | #!/usr/bin/env python
"""
Commandline tool for doing operations on Problems
"""
from __future__ import unicode_literals
import argparse
import logging
import sys
from path import path
from cStringIO import StringIO
from calc import UndefinedVariable
from capa.capa_problem import LoncapaProblem
from mako.lookup import TemplateLookup
logging.basicConfig(format="%(levelname)s %(message)s")
log = logging.getLogger('capa.checker')
class DemoSystem(object):
def __init__(self):
self.lookup = TemplateLookup(directories=[path(__file__).dirname() / 'templates'])
self.DEBUG = True
def render_template(self, template_filename, dictionary, context=None):
if context is None:
context = {}
context_dict = {}
context_dict.update(dictionary)
context_dict.update(context)
return self.lookup.get_template(template_filename).render(**context_dict)
def main():
parser = argparse.ArgumentParser(description='Check Problem Files')
parser.add_argument("command", choices=['test', 'show']) # Watch? Render? Open?
parser.add_argument("files", nargs="+", type=argparse.FileType('r'))
parser.add_argument("--seed", required=False, type=int)
parser.add_argument("--log-level", required=False, default="INFO",
choices=['info', 'debug', 'warn', 'error',
'INFO', 'DEBUG', 'WARN', 'ERROR'])
args = parser.parse_args()
log.setLevel(args.log_level.upper())
system = DemoSystem()
for problem_file in args.files:
log.info("Opening {0}".format(problem_file.name))
try:
problem = LoncapaProblem(problem_file, "fakeid", seed=args.seed, system=system)
except Exception as ex:
log.error("Could not parse file {0}".format(problem_file.name))
log.exception(ex)
continue
if args.command == 'test':
command_test(problem)
elif args.command == 'show':
command_show(problem)
problem_file.close()
# In case we want to do anything else here.
def command_show(problem):
"""Display the text for this problem"""
print problem.get_html()
def command_test(problem):
# We're going to trap stdout/stderr from the problems (yes, some print)
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
check_that_suggested_answers_work(problem)
check_that_blanks_fail(problem)
log_captured_output(sys.stdout,
"captured stdout from {0}".format(problem))
log_captured_output(sys.stderr,
"captured stderr from {0}".format(problem))
except Exception as e:
log.exception(e)
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def check_that_blanks_fail(problem):
"""Leaving it blank should never work. Neither should a space."""
blank_answers = dict((answer_id, u"")
for answer_id in problem.get_question_answers())
grading_results = problem.grade_answers(blank_answers)
try:
assert(all(result == 'incorrect' for result in grading_results.values()))
except AssertionError:
log.error("Blank accepted as correct answer in {0} for {1}"
.format(problem,
[answer_id for answer_id, result
in sorted(grading_results.items())
if result != 'incorrect']))
def check_that_suggested_answers_work(problem):
"""Split this up so that we're only used for formula/numeric answers.
Examples of where this fails:
* Displayed answers use units but acceptable ones do not.
- L1e0.xml
- Presents itself as UndefinedVariable (when it tries to pass to calc)
* "a or d" is what's displayed, but only "a" or "d" is accepted, not the
string "a or d".
- L1-e00.xml
"""
# These are actual answers we get from the responsetypes
real_answers = problem.get_question_answers()
# all_answers is real_answers + blanks for other answer_ids for which the
# responsetypes can't provide us pre-canned answers (customresponse)
all_answer_ids = problem.get_answer_ids()
all_answers = dict((answer_id, real_answers.get(answer_id, ""))
for answer_id in all_answer_ids)
log.debug("Real answers: {0}".format(real_answers))
if real_answers:
try:
real_results = dict((answer_id, result) for answer_id, result
in problem.grade_answers(all_answers).items()
if answer_id in real_answers)
log.debug(real_results)
assert(all(result == 'correct'
for answer_id, result in real_results.items()))
except UndefinedVariable as uv_exc:
log.error("The variable \"{0}\" specified in the ".format(uv_exc) +
"solution isn't recognized (is it a units measure?).")
except AssertionError:
log.error("The following generated answers were not accepted for {0}:"
.format(problem))
for question_id, result in sorted(real_results.items()):
if result != 'correct':
log.error(" {0} = {1}".format(question_id, real_answers[question_id]))
except Exception as ex:
log.error("Uncaught error in {0}".format(problem))
log.exception(ex)
def log_captured_output(output_stream, stream_name):
output_stream.seek(0)
output_text = output_stream.read()
if output_text:
log.info("##### Begin {0} #####\n".format(stream_name) + output_text)
log.info("##### End {0} #####".format(stream_name))
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
Nitrate/Nitrate | src/tcms/core/files.py | 2 | 8640 | # -*- coding: utf-8 -*-
import hashlib
import os
import logging
import time
import urllib.parse
from http import HTTPStatus
from django.conf import settings
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import smart_str
from django.views import generic
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_GET, require_POST
from tcms.core.views import prompt
from tcms.testcases.models import TestCase, TestCaseAttachment
from tcms.testplans.models import TestPlan, TestPlanAttachment
from tcms.management.models import TestAttachment, TestAttachmentData
log = logging.getLogger(__name__)
def calculate_checksum(uploaded_file: UploadedFile) -> str:
md5 = hashlib.md5()
for chunk in uploaded_file.chunks():
md5.update(chunk)
return md5.hexdigest()
class UploadFileView(PermissionRequiredMixin, generic.View):
"""Upload a file"""
permission_required = "management.add_testattachment"
@method_decorator(csrf_protect)
def post(self, request):
to_plan_id = request.POST.get("to_plan_id")
to_case_id = request.POST.get("to_case_id")
if to_plan_id is None and to_case_id is None:
return prompt.alert(
request,
"Uploading file works with plan or case. Nitrate cannot "
"proceed without plan or case ID.",
)
if to_plan_id is not None:
redirect_url = reverse("plan-attachment", args=[to_plan_id])
create_rel = TestPlanAttachment.objects.create
rel_kwargs = {"plan_id": int(to_plan_id)}
else:
redirect_url = reverse("case-attachment", args=[to_case_id])
create_rel = TestCaseAttachment.objects.create
rel_kwargs = {"case_id": int(to_case_id)}
upload_file = request.FILES.get("upload_file")
if not upload_file:
return HttpResponseRedirect(redirect_url)
upload_file: UploadedFile = request.FILES["upload_file"]
if upload_file.size > settings.MAX_UPLOAD_SIZE:
return prompt.alert(
request,
f"You upload entity is too large. Please ensure the file "
f"is less than {settings.MAX_UPLOAD_SIZE} bytes.",
)
uploaded_filename = upload_file.name
try:
uploaded_filename.encode()
except UnicodeEncodeError:
return prompt.alert(request, "Upload File name is not legal.")
# Create the upload directory when it's not exist
if not os.path.exists(settings.FILE_UPLOAD_DIR):
os.mkdir(settings.FILE_UPLOAD_DIR)
checksum = calculate_checksum(upload_file)
attachment = TestAttachment.objects.filter(checksum=checksum).first()
if attachment is not None:
if attachment.file_name == uploaded_filename:
return prompt.alert(request, f"File {uploaded_filename} has been uploaded already.")
else:
return prompt.alert(
request,
f"A file {attachment.file_name} having same content has "
f"been uploaded previously.",
)
stored_name = "{}-{}-{}".format(request.user.username, time.time(), uploaded_filename)
attachment = TestAttachment(
submitter_id=request.user.id,
description=request.POST.get("description", None),
file_name=uploaded_filename,
stored_name=stored_name,
mime_type=upload_file.content_type,
checksum=checksum,
)
with open(attachment.stored_filename, "wb+") as f:
for chunk in upload_file.chunks():
f.write(chunk)
attachment.save()
rel_kwargs["attachment"] = attachment
create_rel(**rel_kwargs)
return HttpResponseRedirect(redirect_url)
@require_GET
def check_file(request, file_id):
"""Download attachment file"""
attachment = get_object_or_404(TestAttachment, pk=file_id)
attachment_data = TestAttachmentData.objects.filter(attachment__attachment_id=file_id).first()
# First try to read file content from database.
if attachment_data:
# File content is not written into TestAttachmentData in upload_file,
# this code path is dead now. Think about if file content should be
# written into database in the future.
contents = attachment_data.contents
else:
# File was not written into database, read it from configured file
# system.
stored_file_name = os.path.join(
settings.FILE_UPLOAD_DIR,
urllib.parse.unquote(attachment.stored_name or attachment.file_name),
).replace("\\", "/")
if not os.path.exists(stored_file_name):
raise Http404(f"Attachment file {stored_file_name} does not exist.")
try:
with open(stored_file_name, "rb") as f:
contents = f.read()
except IOError:
msg = "Cannot read attachment file from server."
log.exception(msg)
return prompt.alert(request, msg)
response = HttpResponse(contents, content_type=str(attachment.mime_type))
file_name = smart_str(attachment.file_name)
response["Content-Disposition"] = f'attachment; filename="{file_name}"'
return response
def able_to_delete_attachment(request, file_id: int) -> bool:
"""
These are allowed to delete attachment -
1. super user
2. attachments's submitter
3. testplan's author or owner
4. testcase's owner
"""
user = request.user
if user.is_superuser:
return True
attach = TestAttachment.objects.get(attachment_id=file_id)
if user.pk == attach.submitter_id:
return True
if "from_plan" in request.POST:
plan_id = int(request.POST["from_plan"])
plan = TestPlan.objects.get(plan_id=plan_id)
return user.pk == plan.owner_id or user.pk == plan.author_id
if "from_case" in request.POST:
case_id = int(request.POST["from_case"])
case = TestCase.objects.get(case_id=case_id)
return user.pk == case.author_id
return False
# Delete Attachment
@require_POST
def delete_file(request):
file_id = int(request.POST["file_id"])
state = able_to_delete_attachment(request, file_id)
if not state:
return JsonResponse(
{
"message": f"User {request.user.username} is not allowed to "
f"delete the attachment."
},
status=HTTPStatus.UNAUTHORIZED,
)
# Delete plan's attachment
if "from_plan" in request.POST:
plan_id = int(request.POST["from_plan"])
try:
rel = TestPlanAttachment.objects.get(attachment=file_id, plan_id=plan_id)
except TestPlanAttachment.DoesNotExist:
return JsonResponse(
{"message": f"Attachment {file_id} does not belong to plan {plan_id}."},
status=HTTPStatus.BAD_REQUEST,
)
else:
rel.delete()
attachment = rel.attachment
msg = f"Attachment {attachment.file_name} is removed from plan {plan_id} successfully."
attachment.delete()
os.unlink(attachment.stored_filename)
return JsonResponse({"message": msg})
# Delete cases' attachment
elif "from_case" in request.POST:
case_id = int(request.POST["from_case"])
try:
rel = TestCaseAttachment.objects.get(attachment=file_id, case_id=case_id)
except TestCaseAttachment.DoesNotExist:
return JsonResponse(
{"message": f"Attachment {file_id} does not belong to case {case_id}."},
status=HTTPStatus.BAD_REQUEST,
)
else:
rel.delete()
attachment = rel.attachment
msg = f"Attachment {attachment.file_name} is removed from case {case_id} successfully."
attachment.delete()
os.unlink(attachment.stored_filename)
return JsonResponse({"message": msg})
else:
return JsonResponse(
{"message": "Unknown from where to remove the attachment."},
status=HTTPStatus.BAD_REQUEST,
)
| gpl-2.0 |
UnbDroid/robomagellan | Codigos/Raspberry/desenvolvimentoRos/build/common_msgs/geometry_msgs/cmake/geometry_msgs-genmsg-context.py | 1 | 3114 | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Accel.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/AccelStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/AccelWithCovariance.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/AccelWithCovarianceStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Inertia.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/InertiaStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Point.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Point32.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PointStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Polygon.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PolygonStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Pose2D.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Pose.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PoseArray.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PoseStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PoseWithCovariance.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/PoseWithCovarianceStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Quaternion.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/QuaternionStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Transform.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/TransformStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Twist.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/TwistStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/TwistWithCovariance.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/TwistWithCovarianceStamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Vector3.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Vector3Stamped.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/Wrench.msg;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg/WrenchStamped.msg"
services_str = ""
pkg_name = "geometry_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "geometry_msgs;/home/pi/Documents/desenvolvimentoRos/src/common_msgs/geometry_msgs/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| gpl-3.0 |
cloudtools/troposphere | troposphere/events.py | 1 | 5258 | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.0.0
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class ApiDestination(AWSObject):
resource_type = "AWS::Events::ApiDestination"
props = {
"ConnectionArn": (str, True),
"Description": (str, False),
"HttpMethod": (str, True),
"InvocationEndpoint": (str, True),
"InvocationRateLimitPerSecond": (integer, False),
"Name": (str, False),
}
class Archive(AWSObject):
resource_type = "AWS::Events::Archive"
props = {
"ArchiveName": (str, False),
"Description": (str, False),
"EventPattern": (dict, False),
"RetentionDays": (integer, False),
"SourceArn": (str, True),
}
class Connection(AWSObject):
resource_type = "AWS::Events::Connection"
props = {
"AuthParameters": (dict, True),
"AuthorizationType": (str, True),
"Description": (str, False),
"Name": (str, False),
}
class EventBus(AWSObject):
resource_type = "AWS::Events::EventBus"
props = {
"EventSourceName": (str, False),
"Name": (str, True),
}
class Condition(AWSProperty):
props = {
"Key": (str, False),
"Type": (str, False),
"Value": (str, False),
}
class EventBusPolicy(AWSObject):
resource_type = "AWS::Events::EventBusPolicy"
props = {
"Action": (str, False),
"Condition": (Condition, False),
"EventBusName": (str, False),
"Principal": (str, False),
"Statement": (dict, False),
"StatementId": (str, True),
}
class BatchArrayProperties(AWSProperty):
props = {
"Size": (integer, False),
}
class BatchRetryStrategy(AWSProperty):
props = {
"Attempts": (integer, False),
}
class BatchParameters(AWSProperty):
props = {
"ArrayProperties": (BatchArrayProperties, False),
"JobDefinition": (str, True),
"JobName": (str, True),
"RetryStrategy": (BatchRetryStrategy, False),
}
class DeadLetterConfig(AWSProperty):
props = {
"Arn": (str, False),
}
class AwsVpcConfiguration(AWSProperty):
props = {
"AssignPublicIp": (str, False),
"SecurityGroups": ([str], False),
"Subnets": ([str], True),
}
class NetworkConfiguration(AWSProperty):
props = {
"AwsVpcConfiguration": (AwsVpcConfiguration, False),
}
class EcsParameters(AWSProperty):
props = {
"Group": (str, False),
"LaunchType": (str, False),
"NetworkConfiguration": (NetworkConfiguration, False),
"PlatformVersion": (str, False),
"TaskCount": (integer, False),
"TaskDefinitionArn": (str, True),
}
class HttpParameters(AWSProperty):
props = {
"HeaderParameters": (dict, False),
"PathParameterValues": ([str], False),
"QueryStringParameters": (dict, False),
}
class InputTransformer(AWSProperty):
props = {
"InputPathsMap": (dict, False),
"InputTemplate": (str, True),
}
class KinesisParameters(AWSProperty):
props = {
"PartitionKeyPath": (str, True),
}
class RedshiftDataParameters(AWSProperty):
props = {
"Database": (str, True),
"DbUser": (str, False),
"SecretManagerArn": (str, False),
"Sql": (str, True),
"StatementName": (str, False),
"WithEvent": (boolean, False),
}
class RetryPolicy(AWSProperty):
props = {
"MaximumEventAgeInSeconds": (integer, False),
"MaximumRetryAttempts": (integer, False),
}
class RunCommandTarget(AWSProperty):
props = {
"Key": (str, True),
"Values": ([str], True),
}
class RunCommandParameters(AWSProperty):
props = {
"RunCommandTargets": ([RunCommandTarget], True),
}
class SqsParameters(AWSProperty):
props = {
"MessageGroupId": (str, True),
}
class Target(AWSProperty):
props = {
"Arn": (str, True),
"BatchParameters": (BatchParameters, False),
"DeadLetterConfig": (DeadLetterConfig, False),
"EcsParameters": (EcsParameters, False),
"HttpParameters": (HttpParameters, False),
"Id": (str, True),
"Input": (str, False),
"InputPath": (str, False),
"InputTransformer": (InputTransformer, False),
"KinesisParameters": (KinesisParameters, False),
"RedshiftDataParameters": (RedshiftDataParameters, False),
"RetryPolicy": (RetryPolicy, False),
"RoleArn": (str, False),
"RunCommandParameters": (RunCommandParameters, False),
"SqsParameters": (SqsParameters, False),
}
class Rule(AWSObject):
resource_type = "AWS::Events::Rule"
props = {
"Description": (str, False),
"EventBusName": (str, False),
"EventPattern": (dict, False),
"Name": (str, False),
"RoleArn": (str, False),
"ScheduleExpression": (str, False),
"State": (str, False),
"Targets": ([Target], False),
}
| bsd-2-clause |
shakalaca/ASUS_PadFone_PF500KL | kernel/tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
3dfxsoftware/cbss-addons | psm/model/stock.py | 1 | 4580 | # -*- encoding: utf-8 -*-
############################################################################
# Module Writen to OpenERP, Open Source Management Solution #
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>). #
# All Rights Reserved #
# Credits######################################################
# Coded by: Miguel Delgado <miguel@openerp.com.ve> #
# Planified by: Nhomar Hernandez #
# Finance by: Corporacion AMD #
# Audited by: Humberto Arocha humberto@openerp.com.ve #
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import decimal_precision as dp
class stock_production_lot(osv.Model):
def _serial_identification(self, cr, uid, ids, context=None):
if context is None:
context = {}
spl_brw = self.browse(cr, uid, ids, context=context)
for spl in spl_brw:
if spl.check_serial and spl.stock_available != 0:
return False
return True
_inherit = 'stock.production.lot'
_columns = {
'check_serial': fields.boolean('Check Serial'),
'ref': fields.char('Internal Reference', size=256,
help="""Internal reference number in case it
differs from the manufacturer's
serial number""")
}
_constraints = [
(_serial_identification, _('Check this picking problem with serial'),
['Check Serial (check_serial)', 'Stock Available (stock_available)']),
]
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
ret = []
res = super(stock_production_lot, self).name_get(
cr, uid, ids, context=context)
for i in res:
ret.append((i[0], i[1].split(' ')[0]))
return ret
class stock_picking(osv.Model):
_inherit = "stock.picking"
def test_serial(self, cr, uid, ids):
ok = True
spl_obj = self.pool.get('stock.production.lot')
for pick in self.browse(cr, uid, ids):
for move in pick.move_lines:
if move.product_id.track_serial_incoming and not \
move.prodlot_id and pick.type == 'in':
raise osv.except_osv(_('Error !'), _(
'This product %s should be serialized') %
move.product_id.name)
if move.product_id.track_serial_outgoing and not \
move.prodlot_id and pick.type == 'out':
raise osv.except_osv(_('Error !'), _(
'This product %s should be serialized') %
move.product_id.name)
if move.product_id.track_serial_incoming and \
move.product_id.track_serial_outgoing and\
pick.type == 'out':
spl_ids = spl_obj.search(cr, uid, [(
'product_id', '=', move.product_id.id),
('name', '=', move.prodlot_id.name)])
if len(spl_ids) < 1:
raise osv.except_osv(_('Error !'), _(
'This serial %s is not exist') %
move.prodlot_id.name)
return ok
| gpl-2.0 |
rushiagr/keystone | keystone/tests/unit/test_backend_federation_sql.py | 3 | 1784 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import sql
from keystone.tests.unit import test_backend_sql
class SqlFederation(test_backend_sql.SqlModels):
"""Set of tests for checking SQL Federation."""
def test_identity_provider(self):
cols = (('id', sql.String, 64),
('remote_id', sql.String, 256),
('enabled', sql.Boolean, None),
('description', sql.Text, None))
self.assertExpectedSchema('identity_provider', cols)
def test_federated_protocol(self):
cols = (('id', sql.String, 64),
('idp_id', sql.String, 64),
('mapping_id', sql.String, 64))
self.assertExpectedSchema('federation_protocol', cols)
def test_mapping(self):
cols = (('id', sql.String, 64),
('rules', sql.JsonBlob, None))
self.assertExpectedSchema('mapping', cols)
def test_service_provider(self):
cols = (('auth_url', sql.String, 256),
('id', sql.String, 64),
('enabled', sql.Boolean, None),
('description', sql.Text, None),
('sp_url', sql.String, 256))
self.assertExpectedSchema('service_provider', cols)
| apache-2.0 |
yaroslavvb/tensorflow | tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py | 8 | 34146 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops.rnn_cell_impl import _RNNCell as RNNCell
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
_BIAS_VARIABLE_NAME = "biases"
_WEIGHTS_VARIABLE_NAME = "weights"
@contextlib.contextmanager
def _checked_scope(cell, scope, reuse=None, **kwargs):
if reuse is not None:
kwargs["reuse"] = reuse
with vs.variable_scope(scope, **kwargs) as checking_scope:
scope_name = checking_scope.name
if hasattr(cell, "_scope"):
cell_scope = cell._scope # pylint: disable=protected-access
if cell_scope.name != checking_scope.name:
raise ValueError(
"Attempt to reuse RNNCell %s with a different variable scope than "
"its first use. First use of cell was with scope '%s', this "
"attempt is with scope '%s'. Please create a new instance of the "
"cell if you would like it to use a different set of weights. "
"If before you were using: MultiRNNCell([%s(...)] * num_layers), "
"change to: MultiRNNCell([%s(...) for _ in range(num_layers)]). "
"If before you were using the same cell instance as both the "
"forward and reverse cell of a bidirectional RNN, simply create "
"two instances (one for forward, one for reverse). "
"In May 2017, we will start transitioning this cell's behavior "
"to use existing stored weights, if any, when it is called "
"with scope=None (which can lead to silent model degradation, so "
"this error will remain until then.)"
% (cell, cell_scope.name, scope_name, type(cell).__name__,
type(cell).__name__))
else:
weights_found = False
try:
with vs.variable_scope(checking_scope, reuse=True):
vs.get_variable(_WEIGHTS_VARIABLE_NAME)
weights_found = True
except ValueError:
pass
if weights_found and reuse is None:
raise ValueError(
"Attempt to have a second RNNCell use the weights of a variable "
"scope that already has weights: '%s'; and the cell was not "
"constructed as %s(..., reuse=True). "
"To share the weights of an RNNCell, simply "
"reuse it in your second calculation, or create a new one with "
"the argument reuse=True." % (scope_name, type(cell).__name__))
# Everything is OK. Update the cell's scope and yield it.
cell._scope = checking_scope # pylint: disable=protected-access
yield checking_scope
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh, reuse=None):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
with _checked_scope(self, scope or "basic_rnn_cell", reuse=self._reuse):
output = self._activation(
_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh, reuse=None):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with _checked_scope(self, scope or "gru_cell", reuse=self._reuse):
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(
value=_linear(
[inputs, state], 2 * self._num_units, True, 1.0),
num_or_size_splits=2,
axis=1)
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if not c.dtype == h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=True, activation=tanh, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with _checked_scope(self, scope or "basic_lstm_cell", reuse=self._reuse):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=tanh, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "lstm_cell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with _checked_scope(self, scope or "lstm_cell",
initializer=self._initializer,
reuse=self._reuse) as unit_scope:
if self._num_unit_shards is not None:
unit_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_unit_shards))
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
if self._num_unit_shards is not None:
projection_scope.set_partitioner(None)
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
if self._num_proj_shards is not None:
proj_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_proj_shards))
m = _linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._reuse = reuse
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with _checked_scope(self, scope or "output_projection_wrapper",
reuse=self._reuse):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or "input_projection_wrapper"):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
"""
self._cell = cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None,
reuse=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
self._reuse = reuse
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with _checked_scope(self, scope or "embedding_wrapper", reuse=self._reuse):
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or "multi_rnn_cell"):
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return nn_ops.bias_add(res, biases)
| apache-2.0 |
vkmguy/Flights-and-Hotels | functionality/flight_booking.py | 2 | 2081 | '''
Created on Aug 12, 2015
@author: sadhna01
'''
from classes.flight_booking import FlightBooking
from validations import flight_booking_validation
from database import book_flightDB
from functionality import search_hotel
from exceptions import CustomExceptions
def flight_booking(flightid=None):
try:
if(flightid==None):
print("Enter Flight Id:")
flightid=input()
print("Enter date of travel:")
date_of_travel=input()
print("Enter number of children:")
no_of_children=input()
print("Enter number of adults")
no_of_adults=input()
print("Enter name of primary passenger")
primary_passenger=input()
fare=0
fare=flight_booking_validation.validate_flights(flightid,date_of_travel,no_of_children,no_of_adults,primary_passenger)
print("Total fare:",fare)
count=book_flightDB.retrieve_count();
bookingid="F"+str(count+1)
booking=FlightBooking()
booking.set_bookingid(bookingid)
booking.set_flight_id(flightid)
booking.set_date_of_travel(date_of_travel)
booking.set_no_of_children(no_of_children)
booking.set_no_of_adults(no_of_adults)
booking.set_primary_passenger(primary_passenger)
booking.set_fare(fare)
book_flightDB.bookFlight(booking)
print("Your ticket is successfully booked with booking id",bookingid)
print("Do you wish to search a hotel? Enter 'Y'or 'N'")
ch=input()
if(ch=='Y'):
search_hotel.search_hotel(bookingid);
except CustomExceptions.InvalidDateException as e:
print(e)
except CustomExceptions.InvalidFlightIdException as e:
print(e)
except CustomExceptions.InvalidChildrenException as e:
print(e)
except CustomExceptions.InvalidAdultsException as e:
print(e)
except CustomExceptions.InvalidPrimaryPassenger as e:
print(e)
except Exception as e:
print(e)
finally:
if(fare==0):
flight_booking()
| epl-1.0 |
s142857/servo | tests/wpt/harness/wptrunner/wptmanifest/backends/static.py | 190 | 6645 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import operator
from ..node import NodeVisitor
from ..parser import parse
class Compiler(NodeVisitor):
"""Compiler backend that evaluates conditional expressions
to give static output"""
def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form with conditional expressions
evaluated.
tree - The root node of the wptmanifest AST to compile
expr_data - A dictionary of key / value pairs to use when
evaluating conditional expressions
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
self._kwargs = kwargs
self.expr_data = expr_data
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.output_node = None
self.visit(tree)
return self.output_node
def visit_DataNode(self, node):
output_parent = self.output_node
if self.output_node is None:
assert node.parent is None
self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs)
else:
self.output_node = self.data_cls_getter(self.output_node, node)(node.data)
for child in node.children:
self.visit(child)
if output_parent is not None:
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
def visit_KeyValueNode(self, node):
key_name = node.data
key_value = None
for child in node.children:
value = self.visit(child)
if value is not None:
key_value = value
break
if key_value is not None:
self.output_node.set(key_name, key_value)
def visit_ValueNode(self, node):
return node.data
def visit_AtomNode(self, node):
return node.data
def visit_ListNode(self, node):
return [self.visit(child) for child in node.children]
def visit_ConditionalNode(self, node):
assert len(node.children) == 2
if self.visit(node.children[0]):
return self.visit(node.children[1])
def visit_StringNode(self, node):
value = node.data
for child in node.children:
value = self.visit(child)(value)
return value
def visit_NumberNode(self, node):
if "." in node.data:
return float(node.data)
else:
return int(node.data)
def visit_VariableNode(self, node):
value = self.expr_data[node.data]
for child in node.children:
value = self.visit(child)(value)
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
index = self.visit(node.children[0])
return lambda x: x[index]
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return operator(operand)
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
return operator(operand_0, operand_1)
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, name, **kwargs):
self.parent = None
self.name = name
self.children = []
self._data = {}
def __repr__(self):
return "<ManifestItem %s>" % (self.name)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key):
for node in [self, self.root]:
if key in node._data:
return node._data[key]
raise KeyError
def set(self, name, value):
self._data[name] = value
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def itervalues(self):
for item in self._flatten().itervalues():
yield item
def append(self, child):
child.parent = self
self.children.append(child)
return child
def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
return Compiler().compile(ast,
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
def compile(stream, expr_data, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
| mpl-2.0 |
chrisy/vpp | test/test_pg.py | 2 | 3031 | #!/usr/bin/env python3
import unittest
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from framework import VppTestCase, VppTestRunner
class TestPgTun(VppTestCase):
""" PG Test Case """
def setUp(self):
super(TestPgTun, self).setUp()
# create 3 pg interfaces - one each ethernet, ip4-tun, ip6-tun.
self.create_pg_interfaces(range(0, 1))
self.pg_interfaces += self.create_pg_ip4_interfaces(range(1, 2))
self.pg_interfaces += self.create_pg_ip6_interfaces(range(2, 3))
for i in self.pg_interfaces:
i.admin_up()
for i in [self.pg0, self.pg1]:
i.config_ip4()
for i in [self.pg0, self.pg2]:
i.config_ip6()
self.pg0.resolve_arp()
self.pg0.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
super(TestPgTun, self).tearDown()
def test_pg_tun(self):
""" IP[46] Tunnel Mode PG """
#
# test that we can send and receive IP encap'd packets on the
# tun interfaces
#
N_PKTS = 31
# v4 tun to ethernet
p = (IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw('0' * 48))
rxs = self.send_and_expect(self.pg1, p * N_PKTS, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
# v6 tun to ethernet
p = (IPv6(src=self.pg2.remote_ip6, dst=self.pg0.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw('0' * 48))
rxs = self.send_and_expect(self.pg2, p * N_PKTS, self.pg0)
for rx in rxs:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
# eth to v4 tun
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw('0' * 48))
rxs = self.send_and_expect(self.pg0, p * N_PKTS, self.pg1)
for rx in rxs:
rx = IP(rx)
self.assertFalse(rx.haslayer(Ether))
self.assertEqual(rx[IP].dst, self.pg1.remote_ip4)
# eth to v6 tun
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg2.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw('0' * 48))
rxs = self.send_and_expect(self.pg0, p * N_PKTS, self.pg2)
for rx in rxs:
rx = IPv6(rx)
self.assertFalse(rx.haslayer(Ether))
self.assertEqual(rx[IPv6].dst, self.pg2.remote_ip6)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| apache-2.0 |
cainmatt/django | tests/template_tests/syntax_tests/test_filter_tag.py | 521 | 1795 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FilterTagTests(SimpleTestCase):
@setup({'filter01': '{% filter upper %}{% endfilter %}'})
def test_filter01(self):
output = self.engine.render_to_string('filter01')
self.assertEqual(output, '')
@setup({'filter02': '{% filter upper %}django{% endfilter %}'})
def test_filter02(self):
output = self.engine.render_to_string('filter02')
self.assertEqual(output, 'DJANGO')
@setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'})
def test_filter03(self):
output = self.engine.render_to_string('filter03')
self.assertEqual(output, 'django')
@setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'})
def test_filter04(self):
output = self.engine.render_to_string('filter04', {'remove': 'spam'})
self.assertEqual(output, 'django')
@setup({'filter05': '{% filter safe %}fail{% endfilter %}'})
def test_filter05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05')
@setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'})
def test_filter05bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05bis')
@setup({'filter06': '{% filter escape %}fail{% endfilter %}'})
def test_filter06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06')
@setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'})
def test_filter06bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06bis')
| bsd-3-clause |
nealtodd/django | django/core/files/uploadedfile.py | 471 | 4334 | """
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
| bsd-3-clause |
FedoraScientific/salome-smesh | src/SMESH_SWIG/SMESH_shared_modules.py | 1 | 1433 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
"""
"""
# force SMESH importation at interpretor initialization
# see salome_shared_modules.py
# (avoids incomplete import at run time)
from launchConfigureParser import verbose
if verbose(): print "============== import SMESH ======================="
import SMESH
# this function is required
def init_shared_modules():
"""
This function initializes shared modules that need to be
"""
pass
| lgpl-2.1 |
yeyanchao/calibre | src/calibre/gui2/library/models.py | 1 | 57133 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import functools, re, os, traceback, errno
from collections import defaultdict
from PyQt4.Qt import (QAbstractTableModel, Qt, pyqtSignal, QIcon, QImage,
QModelIndex, QVariant, QDateTime, QColor)
from calibre.gui2 import NONE, UNDEFINED_QDATETIME, error_dialog
from calibre.utils.pyparsing import ParseException
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
from calibre.ebooks.metadata.book.base import SafeFormat
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.config import tweaks, device_prefs
from calibre.utils.date import dt_factory, qt_to_dt, as_local_time
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.library.caches import (_match, CONTAINS_MATCH, EQUALS_MATCH,
REGEXP_MATCH, MetadataBackup, force_to_bool)
from calibre.library.save_to_disk import find_plugboard
from calibre import strftime, isbytestring
from calibre.constants import filesystem_encoding, DEBUG
from calibre.gui2.library import DEFAULT_SORT
from calibre.utils.localization import calibre_langcode_to_name
def human_readable(size, precision=1):
""" Convert a size in bytes into megabytes """
return ('%.'+str(precision)+'f') % ((size/(1024.*1024.)),)
TIME_FMT = '%d %b %Y'
ALIGNMENT_MAP = {'left': Qt.AlignLeft, 'right': Qt.AlignRight, 'center':
Qt.AlignHCenter}
_default_image = None
def default_image():
global _default_image
if _default_image is None:
_default_image = QImage(I('default_cover.png'))
return _default_image
class BooksModel(QAbstractTableModel): # {{{
about_to_be_sorted = pyqtSignal(object, name='aboutToBeSorted')
sorting_done = pyqtSignal(object, name='sortingDone')
database_changed = pyqtSignal(object, name='databaseChanged')
new_bookdisplay_data = pyqtSignal(object)
count_changed_signal = pyqtSignal(int)
searched = pyqtSignal(object)
orig_headers = {
'title' : _("Title"),
'ondevice' : _("On Device"),
'authors' : _("Author(s)"),
'size' : _("Size (MB)"),
'timestamp' : _("Date"),
'pubdate' : _('Published'),
'rating' : _('Rating'),
'publisher' : _("Publisher"),
'tags' : _("Tags"),
'series' : ngettext("Series", 'Series', 1),
'last_modified' : _('Modified'),
'languages' : _('Languages'),
}
def __init__(self, parent=None, buffer=40):
QAbstractTableModel.__init__(self, parent)
self.db = None
self.book_on_device = None
self.editable_cols = ['title', 'authors', 'rating', 'publisher',
'tags', 'series', 'timestamp', 'pubdate',
'languages']
self.default_image = default_image()
self.sorted_on = DEFAULT_SORT
self.sort_history = [self.sorted_on]
self.last_search = '' # The last search performed on this model
self.column_map = []
self.headers = {}
self.alignment_map = {}
self.color_cache = defaultdict(dict)
self.buffer_size = buffer
self.metadata_backup = None
self.bool_yes_icon = QIcon(I('ok.png'))
self.bool_no_icon = QIcon(I('list_remove.png'))
self.bool_blank_icon = QIcon(I('blank.png'))
self.device_connected = False
self.ids_to_highlight = []
self.ids_to_highlight_set = set()
self.current_highlighted_idx = None
self.highlight_only = False
self.colors = frozenset([unicode(c) for c in QColor.colorNames()])
self.formatter = SafeFormat()
self.read_config()
def change_alignment(self, colname, alignment):
if colname in self.column_map and alignment in ('left', 'right', 'center'):
old = self.alignment_map.get(colname, 'left')
if old == alignment:
return
self.alignment_map.pop(colname, None)
if alignment != 'left':
self.alignment_map[colname] = alignment
col = self.column_map.index(colname)
for row in xrange(self.rowCount(QModelIndex())):
self.dataChanged.emit(self.index(row, col), self.index(row,
col))
def is_custom_column(self, cc_label):
return cc_label in self.custom_columns
def read_config(self):
pass
def set_device_connected(self, is_connected):
self.device_connected = is_connected
def refresh_ondevice(self):
self.db.refresh_ondevice()
self.resort()
self.research()
def set_book_on_device_func(self, func):
self.book_on_device = func
def set_database(self, db):
self.ids_to_highlight = []
self.ids_to_highlight_set = set()
self.current_highlighted_idx = None
self.db = db
self.custom_columns = self.db.field_metadata.custom_field_metadata()
self.column_map = list(self.orig_headers.keys()) + \
list(self.custom_columns)
def col_idx(name):
if name == 'ondevice':
return -1
if name not in self.db.field_metadata:
return 100000
return self.db.field_metadata[name]['rec_index']
self.column_map.sort(cmp=lambda x,y: cmp(col_idx(x), col_idx(y)))
for col in self.column_map:
if col in self.orig_headers:
self.headers[col] = self.orig_headers[col]
elif col in self.custom_columns:
self.headers[col] = self.custom_columns[col]['name']
self.build_data_convertors()
self.reset()
self.database_changed.emit(db)
self.stop_metadata_backup()
self.start_metadata_backup()
def start_metadata_backup(self):
self.metadata_backup = MetadataBackup(self.db)
self.metadata_backup.start()
def stop_metadata_backup(self):
if getattr(self, 'metadata_backup', None) is not None:
self.metadata_backup.stop()
# Would like to to a join here, but the thread might be waiting to
# do something on the GUI thread. Deadlock.
def refresh_ids(self, ids, current_row=-1):
self.color_cache = defaultdict(dict)
rows = self.db.refresh_ids(ids)
if rows:
self.refresh_rows(rows, current_row=current_row)
def refresh_rows(self, rows, current_row=-1):
self.color_cache = defaultdict(dict)
for row in rows:
if row == current_row:
self.new_bookdisplay_data.emit(
self.get_book_display_info(row))
self.dataChanged.emit(self.index(row, 0), self.index(row,
self.columnCount(QModelIndex())-1))
def close(self):
self.db.close()
self.db = None
self.reset()
def add_books(self, paths, formats, metadata, add_duplicates=False,
return_ids=False):
ret = self.db.add_books(paths, formats, metadata,
add_duplicates=add_duplicates, return_ids=return_ids)
self.count_changed()
return ret
def add_news(self, path, arg):
ret = self.db.add_news(path, arg)
self.count_changed()
return ret
def add_catalog(self, path, title):
ret = self.db.add_catalog(path, title)
self.count_changed()
return ret
def count_changed(self, *args):
self.color_cache = defaultdict(dict)
self.count_changed_signal.emit(self.db.count())
def row_indices(self, index):
''' Return list indices of all cells in index.row()'''
return [ self.index(index.row(), c) for c in range(self.columnCount(None))]
@property
def by_author(self):
return self.sorted_on[0] == 'authors'
def books_deleted(self):
self.count_changed()
self.reset()
def delete_books(self, indices, permanent=False):
ids = map(self.id, indices)
self.delete_books_by_id(ids, permanent=permanent)
return ids
def delete_books_by_id(self, ids, permanent=False):
for id in ids:
self.db.delete_book(id, permanent=permanent, do_clean=False)
self.db.clean()
self.books_deleted()
def books_added(self, num):
if num > 0:
self.beginInsertRows(QModelIndex(), 0, num-1)
self.endInsertRows()
self.count_changed()
def set_highlight_only(self, toWhat):
self.highlight_only = toWhat
def get_current_highlighted_id(self):
if len(self.ids_to_highlight) == 0 or self.current_highlighted_idx is None:
return None
try:
return self.ids_to_highlight[self.current_highlighted_idx]
except:
return None
def get_next_highlighted_id(self, current_row, forward):
if len(self.ids_to_highlight) == 0 or self.current_highlighted_idx is None:
return None
if current_row is None:
row_ = self.current_highlighted_idx
else:
row_ = current_row
while True:
row_ += 1 if forward else -1
if row_ < 0:
row_ = self.count() - 1;
elif row_ >= self.count():
row_ = 0
if self.id(row_) in self.ids_to_highlight_set:
break
try:
self.current_highlighted_idx = self.ids_to_highlight.index(self.id(row_))
except:
# This shouldn't happen ...
return None
return self.get_current_highlighted_id()
def highlight_ids(self, ids_to_highlight):
self.ids_to_highlight = ids_to_highlight
self.ids_to_highlight_set = set(self.ids_to_highlight)
if self.ids_to_highlight:
self.current_highlighted_idx = 0
else:
self.current_highlighted_idx = None
self.reset()
def search(self, text, reset=True):
try:
if self.highlight_only:
self.db.search('')
if not text:
self.ids_to_highlight = []
self.ids_to_highlight_set = set()
self.current_highlighted_idx = None
else:
self.ids_to_highlight = self.db.search(text, return_matches=True)
self.ids_to_highlight_set = set(self.ids_to_highlight)
if self.ids_to_highlight:
self.current_highlighted_idx = 0
else:
self.current_highlighted_idx = None
else:
self.ids_to_highlight = []
self.ids_to_highlight_set = set()
self.current_highlighted_idx = None
self.db.search(text)
except ParseException as e:
self.searched.emit(e.msg)
return
self.last_search = text
if reset:
self.reset()
if self.last_search:
# Do not issue search done for the null search. It is used to clear
# the search and count records for restrictions
self.searched.emit(True)
def sort(self, col, order, reset=True):
if not self.db:
return
if not isinstance(order, bool):
order = order == Qt.AscendingOrder
label = self.column_map[col]
self._sort(label, order, reset)
def sort_by_named_field(self, field, order, reset=True):
if field in self.db.field_metadata.keys():
self._sort(field, order, reset)
def _sort(self, label, order, reset):
self.about_to_be_sorted.emit(self.db.id)
self.db.sort(label, order)
if reset:
self.reset()
self.sorted_on = (label, order)
self.sort_history.insert(0, self.sorted_on)
self.sorting_done.emit(self.db.index)
def refresh(self, reset=True):
self.db.refresh(field=None)
self.resort(reset=reset)
def reset(self):
self.color_cache = defaultdict(dict)
QAbstractTableModel.reset(self)
def resort(self, reset=True):
if not self.db:
return
self.db.multisort(self.sort_history[:tweaks['maximum_resort_levels']])
if reset:
self.reset()
def research(self, reset=True):
self.search(self.last_search, reset=reset)
def columnCount(self, parent):
if parent and parent.isValid():
return 0
return len(self.column_map)
def rowCount(self, parent):
if parent and parent.isValid():
return 0
return len(self.db.data) if self.db else 0
def count(self):
return self.rowCount(None)
def get_book_display_info(self, idx):
mi = self.db.get_metadata(idx)
mi.size = mi.book_size
mi.cover_data = ('jpg', self.cover(idx))
mi.id = self.db.id(idx)
mi.field_metadata = self.db.field_metadata
mi.path = self.db.abspath(idx, create_dirs=False)
return mi
def current_changed(self, current, previous, emit_signal=True):
if current.isValid():
idx = current.row()
data = self.get_book_display_info(idx)
if emit_signal:
self.new_bookdisplay_data.emit(data)
else:
return data
def get_book_info(self, index):
if isinstance(index, int):
index = self.index(index, 0)
# If index is not valid returns None
data = self.current_changed(index, None, False)
return data
def metadata_for(self, ids, get_cover=True):
'''
WARNING: if get_cover=True temp files are created for mi.cover.
Remember to delete them once you are done with them.
'''
ans = []
for id in ids:
mi = self.db.get_metadata(id, index_is_id=True, get_cover=get_cover)
ans.append(mi)
return ans
def get_metadata(self, rows, rows_are_ids=False, full_metadata=False):
metadata, _full_metadata = [], []
if not rows_are_ids:
rows = [self.db.id(row.row()) for row in rows]
for id in rows:
mi = self.db.get_metadata(id, index_is_id=True)
_full_metadata.append(mi)
au = authors_to_string(mi.authors if mi.authors else [_('Unknown')])
tags = mi.tags if mi.tags else []
if mi.series is not None:
tags.append(mi.series)
info = {
'title' : mi.title,
'authors' : au,
'author_sort' : mi.author_sort,
'cover' : self.db.cover(id, index_is_id=True),
'tags' : tags,
'comments': mi.comments,
}
if mi.series is not None:
info['tag order'] = {
mi.series:self.db.books_in_series_of(id, index_is_id=True)
}
metadata.append(info)
if full_metadata:
return metadata, _full_metadata
else:
return metadata
def get_preferred_formats_from_ids(self, ids, formats,
set_metadata=False, specific_format=None,
exclude_auto=False, mode='r+b',
use_plugboard=None, plugboard_formats=None):
from calibre.ebooks.metadata.meta import set_metadata as _set_metadata
ans = []
need_auto = []
if specific_format is not None:
formats = [specific_format.lower()]
for id in ids:
format = None
fmts = self.db.formats(id, index_is_id=True)
if not fmts:
fmts = ''
db_formats = set(fmts.lower().split(','))
available_formats = set([f.lower() for f in formats])
u = available_formats.intersection(db_formats)
for f in formats:
if f.lower() in u:
format = f
break
if format is not None:
pt = PersistentTemporaryFile(suffix='caltmpfmt.'+format)
self.db.copy_format_to(id, format, pt, index_is_id=True)
pt.seek(0)
if set_metadata:
try:
mi = self.db.get_metadata(id, get_cover=True,
index_is_id=True,
cover_as_data=True)
newmi = None
if use_plugboard and format.lower() in plugboard_formats:
plugboards = self.db.prefs.get('plugboards', {})
cpb = find_plugboard(use_plugboard, format.lower(),
plugboards)
if cpb:
newmi = mi.deepcopy_metadata()
newmi.template_to_attribute(mi, cpb)
if newmi is not None:
_set_metadata(pt, newmi, format)
else:
_set_metadata(pt, mi, format)
except:
traceback.print_exc()
pt.close()
def to_uni(x):
if isbytestring(x):
x = x.decode(filesystem_encoding)
return x
ans.append(to_uni(os.path.abspath(pt.name)))
else:
need_auto.append(id)
if not exclude_auto:
ans.append(None)
return ans, need_auto
def get_preferred_formats(self, rows, formats, paths=False,
set_metadata=False, specific_format=None,
exclude_auto=False):
from calibre.ebooks.metadata.meta import set_metadata as _set_metadata
ans = []
need_auto = []
if specific_format is not None:
formats = [specific_format.lower()]
for row in (row.row() for row in rows):
format = None
fmts = self.db.formats(row)
if not fmts:
fmts = ''
db_formats = set(fmts.lower().split(','))
available_formats = set([f.lower() for f in formats])
u = available_formats.intersection(db_formats)
for f in formats:
if f.lower() in u:
format = f
break
if format is not None:
pt = PersistentTemporaryFile(suffix='.'+format)
self.db.copy_format_to(id, format, pt, index_is_id=True)
pt.seek(0)
if set_metadata:
_set_metadata(pt, self.db.get_metadata(row, get_cover=True,
cover_as_data=True), format)
pt.close() if paths else pt.seek(0)
ans.append(pt)
else:
need_auto.append(row)
if not exclude_auto:
ans.append(None)
return ans, need_auto
def id(self, row):
return self.db.id(getattr(row, 'row', lambda:row)())
def authors(self, row_number):
return self.db.authors(row_number)
def title(self, row_number):
return self.db.title(row_number)
def rating(self, row_number):
ans = self.db.rating(row_number)
ans = ans/2 if ans else 0
return int(ans)
def cover(self, row_number):
data = None
try:
data = self.db.cover(row_number)
except IndexError: # Happens if database has not yet been refreshed
pass
if not data:
return self.default_image
img = QImage()
img.loadFromData(data)
if img.isNull():
img = self.default_image
return img
def build_data_convertors(self):
def authors(r, idx=-1):
au = self.db.data[r][idx]
if au:
au = [a.strip().replace('|', ',') for a in au.split(',')]
return QVariant(' & '.join(au))
else:
return None
def languages(r, idx=-1):
lc = self.db.data[r][idx]
if lc:
langs = [calibre_langcode_to_name(l.strip()) for l in lc.split(',')]
return QVariant(', '.join(langs))
return None
def tags(r, idx=-1):
tags = self.db.data[r][idx]
if tags:
return QVariant(', '.join(sorted(tags.split(','), key=sort_key)))
return None
def series_type(r, idx=-1, siix=-1):
series = self.db.data[r][idx]
if series:
idx = fmt_sidx(self.db.data[r][siix])
return QVariant(series + ' [%s]'%idx)
return None
def size(r, idx=-1):
size = self.db.data[r][idx]
if size:
ans = '%.1f'%(float(size)/(1024*1024))
if size > 0 and ans == '0.0':
ans = '<0.1'
return QVariant(ans)
return None
def rating_type(r, idx=-1):
r = self.db.data[r][idx]
r = r/2.0 if r else 0
return QVariant(int(r))
def datetime_type(r, idx=-1):
val = self.db.data[r][idx]
if val is not None:
return QVariant(QDateTime(as_local_time(val)))
else:
return QVariant(UNDEFINED_QDATETIME)
def bool_type(r, idx=-1):
return None # displayed using a decorator
def bool_type_decorator(r, idx=-1, bool_cols_are_tristate=True):
val = force_to_bool(self.db.data[r][idx])
if not bool_cols_are_tristate:
if val is None or not val:
return self.bool_no_icon
if val:
return self.bool_yes_icon
if val is None:
return self.bool_blank_icon
return self.bool_no_icon
def ondevice_decorator(r, idx=-1):
text = self.db.data[r][idx]
if text:
return self.bool_yes_icon
return self.bool_blank_icon
def text_type(r, mult=None, idx=-1):
text = self.db.data[r][idx]
if text and mult:
jv = mult['list_to_ui']
sv = mult['cache_to_list']
return QVariant(jv.join(
sorted([t.strip() for t in text.split(sv)], key=sort_key)))
return QVariant(text)
def decorated_text_type(r, idx=-1):
text = self.db.data[r][idx]
if force_to_bool(text) is not None:
return None
return QVariant(text)
def number_type(r, idx=-1, fmt=None):
if fmt is not None:
try:
return QVariant(fmt.format(self.db.data[r][idx]))
except:
pass
return QVariant(self.db.data[r][idx])
self.dc = {
'title' : functools.partial(text_type,
idx=self.db.field_metadata['title']['rec_index'], mult=None),
'authors' : functools.partial(authors,
idx=self.db.field_metadata['authors']['rec_index']),
'size' : functools.partial(size,
idx=self.db.field_metadata['size']['rec_index']),
'timestamp': functools.partial(datetime_type,
idx=self.db.field_metadata['timestamp']['rec_index']),
'pubdate' : functools.partial(datetime_type,
idx=self.db.field_metadata['pubdate']['rec_index']),
'last_modified': functools.partial(datetime_type,
idx=self.db.field_metadata['last_modified']['rec_index']),
'rating' : functools.partial(rating_type,
idx=self.db.field_metadata['rating']['rec_index']),
'publisher': functools.partial(text_type,
idx=self.db.field_metadata['publisher']['rec_index'], mult=None),
'tags' : functools.partial(tags,
idx=self.db.field_metadata['tags']['rec_index']),
'series' : functools.partial(series_type,
idx=self.db.field_metadata['series']['rec_index'],
siix=self.db.field_metadata['series_index']['rec_index']),
'ondevice' : functools.partial(text_type,
idx=self.db.field_metadata['ondevice']['rec_index'], mult=None),
'languages': functools.partial(languages,
idx=self.db.field_metadata['languages']['rec_index']),
}
self.dc_decorator = {
'ondevice':functools.partial(ondevice_decorator,
idx=self.db.field_metadata['ondevice']['rec_index']),
}
# Add the custom columns to the data converters
for col in self.custom_columns:
idx = self.custom_columns[col]['rec_index']
datatype = self.custom_columns[col]['datatype']
if datatype in ('text', 'comments', 'composite', 'enumeration'):
mult=self.custom_columns[col]['is_multiple']
self.dc[col] = functools.partial(text_type, idx=idx, mult=mult)
if datatype in ['text', 'composite', 'enumeration'] and not mult:
if self.custom_columns[col]['display'].get('use_decorations', False):
self.dc[col] = functools.partial(decorated_text_type, idx=idx)
self.dc_decorator[col] = functools.partial(
bool_type_decorator, idx=idx,
bool_cols_are_tristate=
self.db.prefs.get('bools_are_tristate'))
elif datatype in ('int', 'float'):
fmt = self.custom_columns[col]['display'].get('number_format', None)
self.dc[col] = functools.partial(number_type, idx=idx, fmt=fmt)
elif datatype == 'datetime':
self.dc[col] = functools.partial(datetime_type, idx=idx)
elif datatype == 'bool':
self.dc[col] = functools.partial(bool_type, idx=idx)
self.dc_decorator[col] = functools.partial(
bool_type_decorator, idx=idx,
bool_cols_are_tristate=
self.db.prefs.get('bools_are_tristate'))
elif datatype == 'rating':
self.dc[col] = functools.partial(rating_type, idx=idx)
elif datatype == 'series':
self.dc[col] = functools.partial(series_type, idx=idx,
siix=self.db.field_metadata.cc_series_index_column_for(col))
else:
print 'What type is this?', col, datatype
# build a index column to data converter map, to remove the string lookup in the data loop
self.column_to_dc_map = []
self.column_to_dc_decorator_map = []
for col in self.column_map:
self.column_to_dc_map.append(self.dc[col])
self.column_to_dc_decorator_map.append(self.dc_decorator.get(col, None))
def data(self, index, role):
col = index.column()
# in obscure cases where custom columns are both edited and added, for a time
# the column map does not accurately represent the screen. In these cases,
# we will get asked to display columns we don't know about. Must test for this.
if col >= len(self.column_to_dc_map):
return NONE
if role in (Qt.DisplayRole, Qt.EditRole, Qt.ToolTipRole):
return self.column_to_dc_map[col](index.row())
elif role == Qt.BackgroundRole:
if self.id(index) in self.ids_to_highlight_set:
return QVariant(QColor('lightgreen'))
elif role == Qt.ForegroundRole:
key = self.column_map[col]
mi = None
for k, fmt in self.db.prefs['column_color_rules']:
if k != key:
continue
id_ = self.id(index)
if id_ in self.color_cache:
if key in self.color_cache[id_]:
return self.color_cache[id_][key]
try:
if mi is None:
mi = self.db.get_metadata(id_, index_is_id=True)
color = self.formatter.safe_format(fmt, mi, '', mi)
if color in self.colors:
color = QColor(color)
if color.isValid():
color = QVariant(color)
self.color_cache[id_][key] = color
return color
except:
continue
if self.is_custom_column(key) and \
self.custom_columns[key]['datatype'] == 'enumeration':
cc = self.custom_columns[self.column_map[col]]['display']
colors = cc.get('enum_colors', [])
values = cc.get('enum_values', [])
txt = unicode(index.data(Qt.DisplayRole).toString())
if len(colors) > 0 and txt in values:
try:
color = QColor(colors[values.index(txt)])
if color.isValid():
return QVariant(color)
except:
pass
return NONE
elif role == Qt.DecorationRole:
if self.column_to_dc_decorator_map[col] is not None:
return self.column_to_dc_decorator_map[index.column()](index.row())
elif role == Qt.TextAlignmentRole:
cname = self.column_map[index.column()]
ans = Qt.AlignVCenter | ALIGNMENT_MAP[self.alignment_map.get(cname,
'left')]
return QVariant(ans)
#elif role == Qt.ToolTipRole and index.isValid():
# if self.column_map[index.column()] in self.editable_cols:
# return QVariant(_("Double click to <b>edit</b> me<br><br>"))
return NONE
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal:
if section >= len(self.column_map): # same problem as in data, the column_map can be wrong
return None
if role == Qt.ToolTipRole:
ht = self.column_map[section]
if ht == 'timestamp': # change help text because users know this field as 'date'
ht = 'date'
return QVariant(_('The lookup/search name is "{0}"').format(ht))
if role == Qt.DisplayRole:
return QVariant(self.headers[self.column_map[section]])
return NONE
if DEBUG and role == Qt.ToolTipRole and orientation == Qt.Vertical:
col = self.db.field_metadata['uuid']['rec_index']
return QVariant(_('This book\'s UUID is "{0}"').format(self.db.data[section][col]))
if role == Qt.DisplayRole: # orientation is vertical
return QVariant(section+1)
return NONE
def flags(self, index):
flags = QAbstractTableModel.flags(self, index)
if index.isValid():
colhead = self.column_map[index.column()]
if colhead in self.editable_cols:
flags |= Qt.ItemIsEditable
elif self.is_custom_column(colhead):
if self.custom_columns[colhead]['is_editable']:
flags |= Qt.ItemIsEditable
return flags
def set_custom_column_data(self, row, colhead, value):
cc = self.custom_columns[colhead]
typ = cc['datatype']
label=self.db.field_metadata.key_to_label(colhead)
s_index = None
if typ in ('text', 'comments'):
val = unicode(value.toString()).strip()
val = val if val else None
elif typ == 'enumeration':
val = unicode(value.toString()).strip()
if not val:
val = None
elif typ == 'bool':
val = value.toPyObject()
elif typ == 'rating':
val = value.toInt()[0]
val = 0 if val < 0 else 5 if val > 5 else val
val *= 2
elif typ in ('int', 'float'):
val = unicode(value.toString()).strip()
if not val:
val = None
elif typ == 'datetime':
val = value.toDateTime()
if val.isNull():
val = None
else:
if not val.isValid():
return False
val = qt_to_dt(val, as_utc=False)
elif typ == 'series':
val = unicode(value.toString()).strip()
if val:
pat = re.compile(r'\[([.0-9]+)\]')
match = pat.search(val)
if match is not None:
s_index = float(match.group(1))
val = pat.sub('', val).strip()
elif val:
# it is OK to leave s_index == None when using 'no_change'
if tweaks['series_index_auto_increment'] != 'const' and \
tweaks['series_index_auto_increment'] != 'no_change':
s_index = self.db.get_next_cc_series_num_for(val,
label=label, num=None)
elif typ == 'composite':
tmpl = unicode(value.toString()).strip()
disp = cc['display']
disp['composite_template'] = tmpl
self.db.set_custom_column_metadata(cc['colnum'], display = disp)
self.refresh(reset=True)
return True
id = self.db.id(row)
books_to_refresh = set([id])
books_to_refresh |= self.db.set_custom(id, val, extra=s_index,
label=label, num=None, append=False, notify=True)
self.refresh_ids(list(books_to_refresh), current_row=row)
return True
def setData(self, index, value, role):
if role == Qt.EditRole:
from calibre.gui2.ui import get_gui
try:
return self._set_data(index, value)
except (IOError, OSError) as err:
if getattr(err, 'errno', None) == errno.EACCES: # Permission denied
import traceback
error_dialog(get_gui(), _('Permission denied'),
_('Could not change the on disk location of this'
' book. Is it open in another program?'),
det_msg=traceback.format_exc(), show=True)
except:
import traceback
traceback.print_exc()
error_dialog(get_gui(), _('Failed to set data'),
_('Could not set data, click Show Details to see why.'),
det_msg=traceback.format_exc(), show=True)
return False
def _set_data(self, index, value):
row, col = index.row(), index.column()
column = self.column_map[col]
if self.is_custom_column(column):
if not self.set_custom_column_data(row, column, value):
return False
else:
if column not in self.editable_cols:
return False
val = (int(value.toInt()[0]) if column == 'rating' else
value.toDateTime() if column in ('timestamp', 'pubdate')
else unicode(value.toString()).strip())
id = self.db.id(row)
books_to_refresh = set([id])
if column == 'rating':
val = 0 if val < 0 else 5 if val > 5 else val
val *= 2
self.db.set_rating(id, val)
elif column == 'series':
val = val.strip()
if not val:
books_to_refresh |= self.db.set_series(id, val,
allow_case_change=True)
self.db.set_series_index(id, 1.0)
else:
pat = re.compile(r'\[([.0-9]+)\]')
match = pat.search(val)
if match is not None:
self.db.set_series_index(id, float(match.group(1)))
val = pat.sub('', val).strip()
elif val:
if tweaks['series_index_auto_increment'] != 'const' and \
tweaks['series_index_auto_increment'] != 'no_change':
ni = self.db.get_next_series_num_for(val)
if ni != 1:
self.db.set_series_index(id, ni)
if val:
books_to_refresh |= self.db.set_series(id, val,
allow_case_change=True)
elif column == 'timestamp':
if val.isNull() or not val.isValid():
return False
self.db.set_timestamp(id, qt_to_dt(val, as_utc=False))
elif column == 'pubdate':
if val.isNull() or not val.isValid():
return False
self.db.set_pubdate(id, qt_to_dt(val, as_utc=False))
elif column == 'languages':
val = val.split(',')
self.db.set_languages(id, val)
else:
books_to_refresh |= self.db.set(row, column, val,
allow_case_change=True)
self.refresh_ids(list(books_to_refresh), row)
self.dataChanged.emit(index, index)
return True
# }}}
class OnDeviceSearch(SearchQueryParser): # {{{
USABLE_LOCATIONS = [
'all',
'author',
'authors',
'collections',
'format',
'formats',
'title',
'inlibrary'
]
def __init__(self, model):
SearchQueryParser.__init__(self, locations=self.USABLE_LOCATIONS)
self.model = model
def universal_set(self):
return set(range(0, len(self.model.db)))
def get_matches(self, location, query):
location = location.lower().strip()
if location == 'authors':
location = 'author'
matchkind = CONTAINS_MATCH
if len(query) > 1:
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
query = query.lower()
if location not in self.USABLE_LOCATIONS:
return set([])
matches = set([])
all_locs = set(self.USABLE_LOCATIONS) - set(['all'])
locations = all_locs if location == 'all' else [location]
q = {
'title' : lambda x : getattr(x, 'title').lower(),
'author': lambda x: ' & '.join(getattr(x, 'authors')).lower(),
'collections':lambda x: ','.join(getattr(x, 'device_collections')).lower(),
'format':lambda x: os.path.splitext(x.path)[1].lower(),
'inlibrary':lambda x : getattr(x, 'in_library')
}
for x in ('author', 'format'):
q[x+'s'] = q[x]
for index, row in enumerate(self.model.db):
for locvalue in locations:
accessor = q[locvalue]
if query == 'true':
if accessor(row):
matches.add(index)
continue
if query == 'false':
if not accessor(row):
matches.add(index)
continue
if locvalue == 'inlibrary':
continue # this is bool, so can't match below
try:
### Can't separate authors because comma is used for name sep and author sep
### Exact match might not get what you want. For that reason, turn author
### exactmatch searches into contains searches.
if locvalue == 'author' and matchkind == EQUALS_MATCH:
m = CONTAINS_MATCH
else:
m = matchkind
if locvalue == 'collections':
vals = accessor(row).split(',')
else:
vals = [accessor(row)]
if _match(query, vals, m):
matches.add(index)
break
except ValueError: # Unicode errors
traceback.print_exc()
return matches
# }}}
class DeviceDBSortKeyGen(object): # {{{
def __init__(self, attr, keyfunc, db):
self.attr = attr
self.db = db
self.keyfunc = keyfunc
def __call__(self, x):
try:
ans = self.keyfunc(getattr(self.db[x], self.attr))
except:
ans = None
return ans
# }}}
class DeviceBooksModel(BooksModel): # {{{
booklist_dirtied = pyqtSignal()
upload_collections = pyqtSignal(object)
def __init__(self, parent):
BooksModel.__init__(self, parent)
self.db = []
self.map = []
self.sorted_map = []
self.sorted_on = DEFAULT_SORT
self.sort_history = [self.sorted_on]
self.unknown = _('Unknown')
self.column_map = ['inlibrary', 'title', 'authors', 'timestamp', 'size',
'collections']
self.headers = {
'inlibrary' : _('In Library'),
'title' : _('Title'),
'authors' : _('Author(s)'),
'timestamp' : _('Date'),
'size' : _('Size'),
'collections' : _('Collections')
}
self.marked_for_deletion = {}
self.search_engine = OnDeviceSearch(self)
self.editable = ['title', 'authors', 'collections']
self.book_in_library = None
def mark_for_deletion(self, job, rows, rows_are_ids=False):
db_indices = rows if rows_are_ids else self.indices(rows)
db_items = [self.db[i] for i in db_indices if -1 < i < len(self.db)]
self.marked_for_deletion[job] = db_items
if rows_are_ids:
self.reset()
else:
for row in rows:
indices = self.row_indices(row)
self.dataChanged.emit(indices[0], indices[-1])
def find_item_in_db(self, item):
idx = None
try:
idx = self.db.index(item)
except:
path = getattr(item, 'path', None)
if path:
for i, x in enumerate(self.db):
if getattr(x, 'path', None) == path:
idx = i
break
return idx
def deletion_done(self, job, succeeded=True):
db_items = self.marked_for_deletion.pop(job, [])
rows = []
for item in db_items:
idx = self.find_item_in_db(item)
if idx is not None:
try:
rows.append(self.map.index(idx))
except ValueError:
pass
for row in rows:
if not succeeded:
indices = self.row_indices(self.index(row, 0))
self.dataChanged.emit(indices[0], indices[-1])
def paths_deleted(self, paths):
self.map = list(range(0, len(self.db)))
self.resort(False)
self.research(True)
def is_row_marked_for_deletion(self, row):
try:
item = self.db[self.map[row]]
except IndexError:
return False
path = getattr(item, 'path', None)
for items in self.marked_for_deletion.itervalues():
for x in items:
if x is item or (path and path == getattr(x, 'path', None)):
return True
return False
def clear_ondevice(self, db_ids, to_what=None):
for data in self.db:
if data is None:
continue
app_id = getattr(data, 'application_id', None)
if app_id is not None and app_id in db_ids:
data.in_library = to_what
self.reset()
def flags(self, index):
if self.is_row_marked_for_deletion(index.row()):
return Qt.NoItemFlags
flags = QAbstractTableModel.flags(self, index)
if index.isValid():
cname = self.column_map[index.column()]
if cname in self.editable and \
(cname != 'collections' or \
(callable(getattr(self.db, 'supports_collections', None)) and \
self.db.supports_collections() and \
device_prefs['manage_device_metadata']=='manual')):
flags |= Qt.ItemIsEditable
return flags
def search(self, text, reset=True):
if not text or not text.strip():
self.map = list(range(len(self.db)))
else:
try:
matches = self.search_engine.parse(text)
except ParseException:
self.searched.emit(False)
return
self.map = []
for i in range(len(self.db)):
if i in matches:
self.map.append(i)
self.resort(reset=False)
if reset:
self.reset()
self.last_search = text
if self.last_search:
self.searched.emit(True)
def research(self, reset=True):
self.search(self.last_search, reset)
def sort(self, col, order, reset=True):
descending = order != Qt.AscendingOrder
cname = self.column_map[col]
def author_key(x):
try:
ax = self.db[x].author_sort
if not ax:
raise Exception('')
except:
try:
ax = authors_to_string(self.db[x].authors)
except:
ax = ''
try:
return sort_key(ax)
except:
return ax
keygen = {
'title': ('title_sorter', lambda x: sort_key(x) if x else ''),
'authors' : author_key,
'size' : ('size', int),
'timestamp': ('datetime', functools.partial(dt_factory, assume_utc=True)),
'collections': ('device_collections', lambda x:sorted(x,
key=sort_key)),
'inlibrary': ('in_library', lambda x: x),
}[cname]
keygen = keygen if callable(keygen) else DeviceDBSortKeyGen(
keygen[0], keygen[1], self.db)
self.map.sort(key=keygen, reverse=descending)
if len(self.map) == len(self.db):
self.sorted_map = list(self.map)
else:
self.sorted_map = list(range(len(self.db)))
self.sorted_map.sort(key=keygen, reverse=descending)
self.sorted_on = (self.column_map[col], order)
self.sort_history.insert(0, self.sorted_on)
if hasattr(keygen, 'db'):
keygen.db = None
if reset:
self.reset()
def resort(self, reset=True):
if self.sorted_on:
self.sort(self.column_map.index(self.sorted_on[0]),
self.sorted_on[1], reset=False)
if reset:
self.reset()
def columnCount(self, parent):
if parent and parent.isValid():
return 0
return len(self.column_map)
def rowCount(self, parent):
if parent and parent.isValid():
return 0
return len(self.map)
def set_database(self, db):
self.custom_columns = {}
self.db = db
self.map = list(range(0, len(db)))
self.research(reset=False)
self.resort()
def cover(self, row):
item = self.db[self.map[row]]
cdata = item.thumbnail
img = QImage()
if cdata is not None:
if hasattr(cdata, 'image_path'):
img.load(cdata.image_path)
elif cdata:
if isinstance(cdata, (tuple, list)):
img.loadFromData(cdata[-1])
else:
img.loadFromData(cdata)
if img.isNull():
img = self.default_image
return img
def get_book_display_info(self, idx):
from calibre.ebooks.metadata.book.base import Metadata
item = self.db[self.map[idx]]
cover = self.cover(idx)
if cover is self.default_image:
cover = None
title = item.title
if not title:
title = _('Unknown')
au = item.authors
if not au:
au = [_('Unknown')]
mi = Metadata(title, au)
mi.cover_data = ('jpg', cover)
fmt = _('Unknown')
ext = os.path.splitext(item.path)[1]
if ext:
fmt = ext[1:].lower()
mi.formats = [fmt]
mi.path = (item.path if item.path else None)
dt = dt_factory(item.datetime, assume_utc=True)
mi.timestamp = dt
mi.device_collections = list(item.device_collections)
mi.tags = list(getattr(item, 'tags', []))
mi.comments = getattr(item, 'comments', None)
series = getattr(item, 'series', None)
if series:
sidx = getattr(item, 'series_index', 0)
mi.series = series
mi.series_index = sidx
return mi
def current_changed(self, current, previous, emit_signal=True):
if current.isValid():
idx = current.row()
data = self.get_book_display_info(idx)
if emit_signal:
self.new_bookdisplay_data.emit(data)
else:
return data
def paths(self, rows):
return [self.db[self.map[r.row()]].path for r in rows ]
def paths_for_db_ids(self, db_ids, as_map=False):
res = defaultdict(list) if as_map else []
for r,b in enumerate(self.db):
if b.application_id in db_ids:
if as_map:
res[b.application_id].append(b)
else:
res.append((r,b))
return res
def get_collections_with_ids(self):
collections = set()
for book in self.db:
if book.device_collections is not None:
collections.update(set(book.device_collections))
self.collections = []
result = []
for i,collection in enumerate(collections):
result.append((i, collection))
self.collections.append(collection)
return result
def rename_collection(self, old_id, new_name):
old_name = self.collections[old_id]
for book in self.db:
if book.device_collections is None:
continue
if old_name in book.device_collections:
book.device_collections.remove(old_name)
if new_name not in book.device_collections:
book.device_collections.append(new_name)
def delete_collection_using_id(self, old_id):
old_name = self.collections[old_id]
for book in self.db:
if book.device_collections is None:
continue
if old_name in book.device_collections:
book.device_collections.remove(old_name)
def indices(self, rows):
'''
Return indices into underlying database from rows
'''
return [self.map[r.row()] for r in rows]
def data(self, index, role):
row, col = index.row(), index.column()
cname = self.column_map[col]
if role == Qt.DisplayRole or role == Qt.EditRole:
if cname == 'title':
text = self.db[self.map[row]].title
if not text:
text = self.unknown
return QVariant(text)
elif cname == 'authors':
au = self.db[self.map[row]].authors
if not au:
au = [_('Unknown')]
return QVariant(authors_to_string(au))
elif cname == 'size':
size = self.db[self.map[row]].size
return QVariant(human_readable(size))
elif cname == 'timestamp':
dt = self.db[self.map[row]].datetime
dt = dt_factory(dt, assume_utc=True, as_utc=False)
return QVariant(strftime(TIME_FMT, dt.timetuple()))
elif cname == 'collections':
tags = self.db[self.map[row]].device_collections
if tags:
tags.sort(key=sort_key)
return QVariant(', '.join(tags))
elif DEBUG and cname == 'inlibrary':
return QVariant(self.db[self.map[row]].in_library)
elif role == Qt.ToolTipRole and index.isValid():
if self.is_row_marked_for_deletion(row):
return QVariant(_('Marked for deletion'))
if cname in ['title', 'authors'] or (cname == 'collections' and \
self.db.supports_collections()):
return QVariant(_("Double click to <b>edit</b> me<br><br>"))
elif role == Qt.DecorationRole and cname == 'inlibrary':
if self.db[self.map[row]].in_library:
return QVariant(self.bool_yes_icon)
elif self.db[self.map[row]].in_library is not None:
return QVariant(self.bool_no_icon)
elif role == Qt.TextAlignmentRole:
cname = self.column_map[index.column()]
ans = Qt.AlignVCenter | ALIGNMENT_MAP[self.alignment_map.get(cname,
'left')]
return QVariant(ans)
return NONE
def headerData(self, section, orientation, role):
if role == Qt.ToolTipRole and orientation == Qt.Horizontal:
return QVariant(_('The lookup/search name is "{0}"').format(self.column_map[section]))
if DEBUG and role == Qt.ToolTipRole and orientation == Qt.Vertical:
return QVariant(_('This book\'s UUID is "{0}"').format(self.db[self.map[section]].uuid))
if role != Qt.DisplayRole:
return NONE
if orientation == Qt.Horizontal:
cname = self.column_map[section]
text = self.headers[cname]
return QVariant(text)
else:
return QVariant(section+1)
def setData(self, index, value, role):
done = False
if role == Qt.EditRole:
row, col = index.row(), index.column()
cname = self.column_map[col]
if cname in ('size', 'timestamp', 'inlibrary'):
return False
val = unicode(value.toString()).strip()
idx = self.map[row]
if cname == 'collections':
tags = [i.strip() for i in val.split(',')]
tags = [t for t in tags if t]
self.db[idx].device_collections = tags
self.dataChanged.emit(index, index)
self.upload_collections.emit(self.db)
return True
if cname == 'title' :
self.db[idx].title = val
elif cname == 'authors':
self.db[idx].authors = string_to_authors(val)
self.dataChanged.emit(index, index)
self.booklist_dirtied.emit()
done = True
return done
def set_editable(self, editable):
# Cannot edit if metadata is sent on connect. Reason: changes will
# revert to what is in the library on next connect.
if isinstance(editable, list):
self.editable = editable
elif editable:
self.editable = ['title', 'authors', 'collections']
else:
self.editable = []
if device_prefs['manage_device_metadata']=='on_connect':
self.editable = []
# }}}
| gpl-3.0 |
jonobrien/School_Backups | cs1-python/Homework/week 8/wtf.py | 2 | 2046 | """
Author: Jon O'Brien
Due: 10/26/13
Assignment: Hashing homework - birthday.py
This program determines how many people are needed to find a duplicate person
in the same room for the same birthday. There is a function that uses a while True loop
and if statements to determine if the generated number is already contained in the set
and if it is not then it is added to the set. The count is accumulated and used in the
averaging of number of people per room that is required for there to be a duplicate.
"""
import random
def makeSet():
"""
This function takes no parameters and creates the set. There is a while loop
that takes a random integer between 0 and 365, for a total of 365 integers
and compares that random value to the set to determine if there is a duplicate.
If that random integer is not in the set then it is added to the set and the
test is performed again. This count is accumulated over the running of the function.
"""
mySet=set()
count = 0
while True:
birthday = random.randint(0,365)
if birthday in mySet:
return count
else:
mySet.add(birthday)
count+=1
def main():
"""
This function takes user input for an integer. If this input value is not between
the if statement range of between 10 and 10000, the incorrect value statement
is printed. If the value is in that range, then the for loop iterates over the
range of values every time in the makeSet() function and accumulates the values
returns from that function. This returned value is divided by the input integer
and the average number of rooms is needed for the required result of average birthdays.
"""
tests=int(input("tests (10-10000):"))
if tests<10 or tests>10000:
print("incorrect value for tests")
else:
accumulator = 0
for num in range(tests):
accumulator+=makeSet()
accumulator2=(accumulator/tests)
print(accumulator2)
main()
| gpl-3.0 |
postlund/home-assistant | tests/components/smartthings/test_config_flow.py | 3 | 12829 | """Tests for the SmartThings config flow module."""
from uuid import uuid4
from aiohttp import ClientResponseError
from asynctest import Mock, patch
from pysmartthings import APIResponseError
from homeassistant import data_entry_flow
from homeassistant.components.smartthings import smartapp
from homeassistant.components.smartthings.config_flow import SmartThingsFlowHandler
from homeassistant.components.smartthings.const import (
CONF_INSTALLED_APP_ID,
CONF_INSTALLED_APPS,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DOMAIN,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, mock_coro
async def test_step_user(hass):
"""Test the access token form is shown for a user initiated flow."""
flow = SmartThingsFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_init(hass):
"""Test the access token form is shown for an init flow."""
flow = SmartThingsFlowHandler()
flow.hass = hass
result = await flow.async_step_import()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_base_url_not_https(hass):
"""Test the base_url parameter starts with https://."""
hass.config.api.base_url = "http://0.0.0.0"
flow = SmartThingsFlowHandler()
flow.hass = hass
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "base_url_not_https"}
async def test_invalid_token_format(hass):
"""Test an error is shown for invalid token formats."""
flow = SmartThingsFlowHandler()
flow.hass = hass
result = await flow.async_step_user({"access_token": "123456789"})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"access_token": "token_invalid_format"}
async def test_token_already_setup(hass):
"""Test an error is shown when the token is already setup."""
flow = SmartThingsFlowHandler()
flow.hass = hass
token = str(uuid4())
entry = MockConfigEntry(domain=DOMAIN, data={"access_token": token})
entry.add_to_hass(hass)
result = await flow.async_step_user({"access_token": token})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"access_token": "token_already_setup"}
async def test_token_unauthorized(hass, smartthings_mock):
"""Test an error is shown when the token is not authorized."""
flow = SmartThingsFlowHandler()
flow.hass = hass
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=401
)
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"access_token": "token_unauthorized"}
async def test_token_forbidden(hass, smartthings_mock):
"""Test an error is shown when the token is forbidden."""
flow = SmartThingsFlowHandler()
flow.hass = hass
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=403
)
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"access_token": "token_forbidden"}
async def test_webhook_error(hass, smartthings_mock):
"""Test an error is when there's an error with the webhook endpoint."""
flow = SmartThingsFlowHandler()
flow.hass = hass
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=422
)
error.is_target_error = Mock(return_value=True)
smartthings_mock.apps.side_effect = error
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "webhook_error"}
async def test_api_error(hass, smartthings_mock):
"""Test an error is shown when other API errors occur."""
flow = SmartThingsFlowHandler()
flow.hass = hass
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=400
)
smartthings_mock.apps.side_effect = error
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "app_setup_error"}
async def test_unknown_api_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
flow = SmartThingsFlowHandler()
flow.hass = hass
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=404
)
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "app_setup_error"}
async def test_unknown_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
flow = SmartThingsFlowHandler()
flow.hass = hass
smartthings_mock.apps.side_effect = Exception("Unknown error")
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "app_setup_error"}
async def test_app_created_then_show_wait_form(
hass, app, app_oauth_client, smartthings_mock
):
"""Test SmartApp is created when one does not exist and shows wait form."""
flow = SmartThingsFlowHandler()
flow.hass = hass
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "wait_install"
async def test_cloudhook_app_created_then_show_wait_form(
hass, app, app_oauth_client, smartthings_mock
):
"""Test SmartApp is created with a cloudhoko and shows wait form."""
hass.config.components.add("cloud")
# Unload the endpoint so we can reload it under the cloud.
await smartapp.unload_smartapp_endpoint(hass)
with patch.object(
hass.components.cloud, "async_active_subscription", return_value=True
), patch.object(
hass.components.cloud,
"async_create_cloudhook",
return_value=mock_coro("http://cloud.test"),
) as mock_create_cloudhook:
await smartapp.setup_smartapp_endpoint(hass)
flow = SmartThingsFlowHandler()
flow.hass = hass
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "wait_install"
assert mock_create_cloudhook.call_count == 1
async def test_app_updated_then_show_wait_form(
hass, app, app_oauth_client, smartthings_mock
):
"""Test SmartApp is updated when an existing is already created."""
flow = SmartThingsFlowHandler()
flow.hass = hass
smartthings_mock.apps.return_value = [app]
smartthings_mock.generate_app_oauth.return_value = app_oauth_client
result = await flow.async_step_user({"access_token": str(uuid4())})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "wait_install"
async def test_wait_form_displayed(hass):
"""Test the wait for installation form is displayed."""
flow = SmartThingsFlowHandler()
flow.hass = hass
result = await flow.async_step_wait_install(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "wait_install"
async def test_wait_form_displayed_after_checking(hass, smartthings_mock):
"""Test error is shown when the user has not installed the app."""
flow = SmartThingsFlowHandler()
flow.hass = hass
flow.access_token = str(uuid4())
result = await flow.async_step_wait_install({})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "wait_install"
assert result["errors"] == {"base": "app_not_installed"}
async def test_config_entry_created_when_installed(
hass, location, installed_app, smartthings_mock
):
"""Test a config entry is created once the app is installed."""
flow = SmartThingsFlowHandler()
flow.hass = hass
flow.access_token = str(uuid4())
flow.app_id = installed_app.app_id
flow.api = smartthings_mock
flow.oauth_client_id = str(uuid4())
flow.oauth_client_secret = str(uuid4())
data = {
CONF_REFRESH_TOKEN: str(uuid4()),
CONF_LOCATION_ID: installed_app.location_id,
CONF_INSTALLED_APP_ID: installed_app.installed_app_id,
}
hass.data[DOMAIN][CONF_INSTALLED_APPS].append(data)
result = await flow.async_step_wait_install({})
assert not hass.data[DOMAIN][CONF_INSTALLED_APPS]
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == installed_app.app_id
assert result["data"]["installed_app_id"] == installed_app.installed_app_id
assert result["data"]["location_id"] == installed_app.location_id
assert result["data"]["access_token"] == flow.access_token
assert result["data"]["refresh_token"] == data[CONF_REFRESH_TOKEN]
assert result["data"]["client_secret"] == flow.oauth_client_secret
assert result["data"]["client_id"] == flow.oauth_client_id
assert result["title"] == location.name
async def test_multiple_config_entry_created_when_installed(
hass, app, locations, installed_apps, smartthings_mock
):
"""Test a config entries are created for multiple installs."""
assert await async_setup_component(hass, "persistent_notification", {})
flow = SmartThingsFlowHandler()
flow.hass = hass
flow.access_token = str(uuid4())
flow.app_id = app.app_id
flow.api = smartthings_mock
flow.oauth_client_id = str(uuid4())
flow.oauth_client_secret = str(uuid4())
for installed_app in installed_apps:
data = {
CONF_REFRESH_TOKEN: str(uuid4()),
CONF_LOCATION_ID: installed_app.location_id,
CONF_INSTALLED_APP_ID: installed_app.installed_app_id,
}
hass.data[DOMAIN][CONF_INSTALLED_APPS].append(data)
install_data = hass.data[DOMAIN][CONF_INSTALLED_APPS].copy()
result = await flow.async_step_wait_install({})
assert not hass.data[DOMAIN][CONF_INSTALLED_APPS]
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == installed_apps[0].app_id
assert result["data"]["installed_app_id"] == installed_apps[0].installed_app_id
assert result["data"]["location_id"] == installed_apps[0].location_id
assert result["data"]["access_token"] == flow.access_token
assert result["data"]["refresh_token"] == install_data[0][CONF_REFRESH_TOKEN]
assert result["data"]["client_secret"] == flow.oauth_client_secret
assert result["data"]["client_id"] == flow.oauth_client_id
assert result["title"] == locations[0].name
await hass.async_block_till_done()
entries = hass.config_entries.async_entries("smartthings")
assert len(entries) == 1
assert entries[0].data["app_id"] == installed_apps[1].app_id
assert entries[0].data["installed_app_id"] == installed_apps[1].installed_app_id
assert entries[0].data["location_id"] == installed_apps[1].location_id
assert entries[0].data["access_token"] == flow.access_token
assert entries[0].data["client_secret"] == flow.oauth_client_secret
assert entries[0].data["client_id"] == flow.oauth_client_id
assert entries[0].title == locations[1].name
| apache-2.0 |
nhuntwalker/astroML | astroML/decorators.py | 3 | 3743 | from __future__ import print_function
import os
from . import py3k_compat as pickle
import numpy as np
def pickle_results(filename=None, verbose=True):
"""Generator for decorator which allows pickling the results of a funcion
Pickle is python's built-in object serialization. This decorator, when
used on a function, saves the results of the computation in the function
to a pickle file. If the function is called a second time with the
same inputs, then the computation will not be repeated and the previous
results will be used.
This functionality is useful for computations which take a long time,
but will need to be repeated (such as the first step of a data analysis).
Parameters
----------
filename : string (optional)
pickle file to which results will be saved.
If not specified, then the file is '<funcname>_output.pkl'
where '<funcname>' is replaced by the name of the decorated function.
verbose : boolean (optional)
if True, then print a message to standard out specifying when the
pickle file is written or read.
Examples
--------
>>> @pickle_results('tmp.pkl', verbose=True)
... def f(x):
... return x * x
>>> f(4)
@pickle_results: computing results and saving to 'tmp.pkl'
16
>>> f(4)
@pickle_results: using precomputed results from 'tmp.pkl'
16
>>> f(6)
@pickle_results: computing results and saving to 'tmp.pkl'
36
>>> import os; os.remove('tmp.pkl')
"""
def pickle_func(f, filename=filename, verbose=verbose):
if filename is None:
filename = '%s_output.pkl' % f.__name__
def new_f(*args, **kwargs):
try:
D = pickle.load(open(filename, 'rb'))
cache_exists = True
except:
D = {}
cache_exists = False
# simple comparison doesn't work in the case of numpy arrays
Dargs = D.get('args')
Dkwargs = D.get('kwargs')
try:
args_match = (args == Dargs)
except:
args_match = np.all([np.all(a1 == a2)
for (a1, a2) in zip(Dargs, args)])
try:
kwargs_match = (kwargs == Dkwargs)
except:
kwargs_match = ((sorted(Dkwargs.keys())
== sorted(kwargs.keys()))
and (np.all([np.all(Dkwargs[key]
== kwargs[key])
for key in kwargs])))
if (type(D) == dict and D.get('funcname') == f.__name__
and args_match and kwargs_match):
if verbose:
print("@pickle_results: using precomputed "
"results from '%s'" % filename)
retval = D['retval']
else:
if verbose:
print("@pickle_results: computing results "
"and saving to '%s'" % filename)
if cache_exists:
print(" warning: cache file '%s' exists" % filename)
print(" - args match: %s" % args_match)
print(" - kwargs match: %s" % kwargs_match)
retval = f(*args, **kwargs)
funcdict = dict(funcname=f.__name__, retval=retval,
args=args, kwargs=kwargs)
with open(filename, 'wb') as outfile:
pickle.dump(funcdict, outfile)
return retval
return new_f
return pickle_func
| bsd-2-clause |
corakwue/ftrace | ftrace/parsers/__init__.py | 1 | 5349 | #!/usr/bin/env python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from .register import PARSERS
# CPU
from .sched_switch import sched_switch
from .sched_wakeup import sched_wakeup
from .sched_migrate_task import sched_migrate_task
from .cpufreq_interactive_setspeed import cpufreq_interactive_setspeed
from .cpufreq_interactive_target import cpufreq_interactive_target
from .cpufreq_interactive_already import cpufreq_interactive_already
# GPU
from .gpu_sched_switch import gpu_sched_switch
from .kgsl_pwr_set_state import kgsl_pwr_set_state
from .kgsl_gpubusy import kgsl_gpubusy
from .kgsl_buslevel import kgsl_buslevel
from .kgsl_pwrlevel import kgsl_pwrlevel
from .kgsl_rail import kgsl_rail
from .kgsl_bus import kgsl_bus
from .kgsl_irq import kgsl_irq
from .kgsl_clk import kgsl_clk
from .mali_job_slots_event import mali_job_slots_event
from .mali_pm_status import mali_pm_status
from .mali_pm_power_on import mali_pm_power_on
from .mali_pm_power_off import mali_pm_power_off
# Bus
from .memory_bus_usage import memory_bus_usage
from .bus_update_request import bus_update_request #msm
# Android
from .tracing_mark_write import tracing_mark_write
# Work Queue
from .workqueue_execute_start import workqueue_execute_start
from .workqueue_execute_end import workqueue_execute_end
from .workqueue_queue_work import workqueue_queue_work
from .workqueue_activate_work import workqueue_activate_work
# Disk
from .block_rq_issue import block_rq_issue
from .block_rq_complete import block_rq_complete
from .block_rq_insert import block_rq_insert
from .ext4_da_write_begin import ext4_da_write_begin
from .ext4_da_write_end import ext4_da_write_end
from .ext4_sync_file_enter import ext4_sync_file_enter
from .ext4_sync_file_exit import ext4_sync_file_exit
from .f2fs_sync_file_enter import f2fs_sync_file_enter
from .f2fs_sync_file_exit import f2fs_sync_file_exit
from .f2fs_write_begin import f2fs_write_begin
from .f2fs_write_end import f2fs_write_end
# Power/Clock
from .cluster_enter import cluster_enter
from .cluster_exit import cluster_exit
from .cpu_idle_enter import cpu_idle_enter
from .cpu_idle_exit import cpu_idle_exit
from .cpu_frequency import cpu_frequency
from .cpu_frequency_switch_start import cpu_frequency_switch_start
from .cpu_frequency_switch_end import cpu_frequency_switch_end
from .cpu_idle import cpu_idle
from .clock_set_rate import clock_set_rate
from .clock_enable import clock_enable
from .clock_disable import clock_disable
# Thermal [MSM]
from .tsens_threshold_clear import tsens_threshold_clear
from .tsens_threshold_hit import tsens_threshold_hit
from .tsens_read import tsens_read
# IRQ
from .irq_handler_entry import irq_handler_entry
from .irq_handler_exit import irq_handler_exit
from .softirq_raise import softirq_raise
from .softirq_entry import softirq_entry
from .softirq_exit import softirq_exit
# SYNC
from .sync_pt import sync_pt
from .sync_timeline import sync_timeline
from .sync_wait import sync_wait
# Qualcomm's HMP
from .sched_task_load import sched_task_load
# Linaro/ARM's HMP
from .sched_hmp_migrate import sched_hmp_migrate
from .sched_rq_nr_running import sched_rq_nr_running
from .sched_rq_runnable_load import sched_rq_runnable_load
from .sched_rq_runnable_ratio import sched_rq_runnable_ratio
from .sched_task_load_contrib import sched_task_load_contrib
from .sched_task_runnable_ratio import sched_task_runnable_ratio
from .sched_task_usage_ratio import sched_task_usage_ratio
# Linaro/ARM's EAS work
from .cpu_capacity import cpu_capacity
from .sched_boost_cpu import sched_boost_cpu
from .sched_contrib_scale_f import sched_contrib_scale_f
from .sched_load_avg_task import sched_load_avg_task
from .sched_load_avg_cpu import sched_load_avg_cpu
# Android binder
from .binder_ioctl import binder_ioctl
from .binder_return import binder_return
from .binder_lock import binder_lock
from .binder_unlock import binder_unlock
from .binder_locked import binder_locked
from .binder_command import binder_command
from .binder_wait_for_work import binder_wait_for_work
from .binder_transaction_buffer_release import binder_transaction_buffer_release
from .binder_transaction import binder_transaction
from .binder_transaction_alloc_buf import binder_transaction_alloc_buf
from .binder_write_done import binder_write_done
from .binder_read_done import binder_read_done
from .binder_ioctl_done import binder_ioctl_done
from .binder_transaction_received import binder_transaction_received
from .binder_transaction_ref_to_node import binder_transaction_ref_to_node
from .binder_transaction_node_to_ref import binder_transaction_node_to_ref
from .binder_transaction_fd import binder_transaction_fd
from .binder_transaction_ref_to_ref import binder_transaction_ref_to_ref
from .binder_update_page_range import binder_update_page_range
| apache-2.0 |
simbuerg/benchbuild | benchbuild/container.py | 1 | 18934 | from plumbum import cli, local, TF, FG, ProcessExecutionError
from benchbuild.utils.cmd import tar, mkdir, mv, rm, bash, cp
from benchbuild.settings import CFG, update_env
from benchbuild.utils import log
from benchbuild.utils.bootstrap import find_package, install_uchroot
from benchbuild.utils.path import mkfile_uchroot, mkdir_uchroot
from benchbuild.utils.path import list_to_path
from benchbuild.utils.container import Gentoo
from benchbuild.utils.run import (run, uchroot, uchroot_with_mounts,
uchroot_no_args, uchroot_env,
uchroot_mounts)
from benchbuild.utils.downloader import Copy, update_hash
from benchbuild.utils.user_interface import ask
from abc import abstractmethod
import logging
import sys
import os
def clean_directories(builddir, in_dir=True, out_dir=True):
"""Remove the in and out of the container if confirmed by the user."""
with local.cwd(builddir):
if in_dir and os.path.exists("container-in") and ask(
"Should I delete '{0}'?".format(os.path.abspath(
"container-in"))):
rm("-rf", "container-in")
if out_dir and os.path.exists("container-out") and ask(
"Should I delete '{0}'?".format(os.path.abspath(
"container-out"))):
rm("-rf", "container-out")
def setup_directories(builddir):
"""Create the in and out directories of the container."""
with local.cwd(builddir):
if not os.path.exists("container-in"):
mkdir("-p", "container-in")
if not os.path.exists("container-out"):
mkdir("-p", "container-out")
def setup_container(builddir, container):
"""Prepare the container and returns the path where it can be found."""
with local.cwd(builddir):
container_filename = str(container).split(os.path.sep)[-1]
container_in = os.path.join("container-in", container_filename)
Copy(container, container_in)
uchroot = uchroot_no_args()
with local.cwd("container-in"):
uchroot = uchroot["-E", "-A", "-u", "0", "-g", "0", "-C", "-r",
"/", "-w", os.path.abspath("."), "--"]
# Check, if we need erlent support for this archive.
has_erlent = bash[
"-c", "tar --list -f './{0}' | grep --silent '.erlent'".format(
container_in)]
has_erlent = (has_erlent & TF)
# Unpack input container to: container-in
if not has_erlent:
cmd = local["/bin/tar"]["xf"]
cmd = uchroot[cmd[container_filename]]
else:
cmd = tar["xf"]
cmd = cmd[os.path.abspath(container_in)]
with local.cwd("container-in"):
cmd("--exclude=dev/*")
rm(container_in)
return os.path.join(builddir, "container-in")
def run_in_container(command, container_dir, mounts):
"""
Run a given command inside a container.
Mounts a directory as a container at the given mountpoint and tries to run
the given command inside the new container.
"""
with local.cwd(container_dir):
uchroot = uchroot_with_mounts()
uchroot = uchroot["-E", "-A", "-u", "0", "-g", "0", "-C", "-w",
"/", "-r", os.path.abspath(container_dir)]
uchroot = uchroot["--"]
cmd_path = os.path.join(container_dir, command[0].lstrip('/'))
if not os.path.exists(cmd_path):
logging.error(
"The command does not exist inside the container! {0}".format(
cmd_path))
return
cmd = uchroot[command]
return cmd & FG
def pack_container(in_container, out_file):
container_filename = os.path.split(out_file)[-1]
out_container = os.path.join("container-out", container_filename)
out_container = os.path.abspath(out_container)
out_tmp_filename = os.path.basename(out_container)
out_dir = os.path.dirname(out_container)
# Pack the results to: container-out
with local.cwd(in_container):
tar("cjf", out_container, ".")
c_hash = update_hash(out_tmp_filename, out_dir)
if not os.path.exists(out_dir):
mkdir("-p", out_dir)
mv(out_container, out_file)
mv(out_container + ".hash", out_file + ".hash")
new_container = {"path": out_file, "hash": str(c_hash)}
CFG["container"]["known"].value().append(new_container)
def setup_bash_in_container(builddir, container, outfile, mounts, shell):
"""
Setup a bash environment inside a container.
Creates a new chroot, which the user can use as a bash to run the wanted
projects inside the mounted container, that also gets returned afterwards.
"""
with local.cwd(builddir):
# Switch to bash inside uchroot
print("Entering bash inside User-Chroot. Prepare your image and "
"type 'exit' when you are done. If bash exits with a non-zero"
"exit code, no new container will be stored.")
store_new_container = True
try:
run_in_container(shell, container, mounts)
except ProcessExecutionError:
store_new_container = False
if store_new_container: # pylint: disable=W0104
print("Packing new container image.")
pack_container(container, outfile)
config_path = CFG["config_file"].value()
CFG.store(config_path)
print("Storing config in {0}".format(os.path.abspath(config_path)))
def find_hash(container_db, key):
"""Find the first container in the database with the given key."""
for keyvalue in container_db:
if keyvalue["hash"].startswith(key):
return keyvalue["path"]
return None
def set_input_container(container, cfg):
"""Save the input for the container in the configurations."""
if not container:
return False
if os.path.exists(container):
cfg["container"]["input"] = container
return True
return False
class MockObj(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ContainerStrategy(object):
"""Interfaces for the different containers chosen by the experiment."""
@abstractmethod
def run(self, context):
pass
class BashStrategy(ContainerStrategy):
"""The user interface for setting up a bash inside the container."""
def run(self, context):
print("Entering a shell in the container.\nUse the exit "
"command to leave the container.")
setup_bash_in_container(context.builddir, context.in_container,
context.out_container, context.mounts,
context.shell)
class SetupPolyJITGentooStrategy(ContainerStrategy):
"""Interface of using gentoo as a container for an experiment."""
def write_wgetrc(self, path):
"""Wget the project from a specified link."""
mkfile_uchroot("/etc/wgetrc")
with open(path, 'w') as wgetrc:
hp = CFG["gentoo"]["http_proxy"].value()
fp = CFG["gentoo"]["ftp_proxy"].value()
if hp is not None:
http_s = "http_proxy = {0}".format(str(hp))
https_s = "https_proxy = {0}".format(str(hp))
wgetrc.write("use_proxy = on\n")
wgetrc.write(http_s + "\n")
wgetrc.write(https_s + "\n")
if fp is not None:
fp_s = "ftp_proxy={0}".format(str(fp))
wgetrc.write(fp_s + "\n")
def write_makeconfig(self, path):
"""Create the stringed to be written in the settings."""
mkfile_uchroot("/etc/portage/make.conf")
with open(path, 'w') as makeconf:
lines = '''
PORTAGE_USERNAME=root
PORTAGE_GROUPNAME=root
CFLAGS="-O2 -pipe"
CXXFLAGS="${CFLAGS}"
FEATURES="-xattr"
CHOST="x86_64-pc-linux-gnu"
USE="bindist mmx sse sse2"
PORTDIR="/usr/portage"
DISTDIR="${PORTDIR}/distfiles"
PKGDIR="${PORTDIR}/packages"
'''
makeconf.write(lines)
hp = CFG["gentoo"]["http_proxy"].value()
if hp is not None:
http_s = "http_proxy={0}".format(str(hp))
https_s = "https_proxy={0}".format(str(hp))
makeconf.write(http_s + "\n")
makeconf.write(https_s + "\n")
fp = CFG["gentoo"]["ftp_proxy"].value()
if fp is not None:
fp_s = "ftp_proxy={0}".format(str(fp))
makeconf.write(fp_s + "\n")
rp = CFG["gentoo"]["rsync_proxy"].value()
if rp is not None:
rp_s = "RSYNC_PROXY={0}".format(str(rp))
makeconf.write(rp_s + "\n")
def write_bashrc(self, path):
"""Write inside a bash and update the shell if necessary."""
mkfile_uchroot("/etc/portage/bashrc")
paths, libs = uchroot_env(
uchroot_mounts("mnt", CFG["container"]["mounts"].value()))
with open(path, 'w') as bashrc:
lines = '''
export PATH="{0}:${{PATH}}"
export LD_LIBRARY_PATH="{1}:${{LD_LIBRARY_PATH}}"
'''.format(list_to_path(paths), list_to_path(libs))
bashrc.write(lines)
def write_layout(self, path):
"""Create a layout from the given path."""
mkdir_uchroot("/etc/portage/metadata")
mkfile_uchroot("/etc/portage/metadata/layout.conf")
with open(path, 'w') as layoutconf:
lines = '''masters = gentoo'''
layoutconf.write(lines)
def configure(self):
"""Configure the gentoo container for a PolyJIT experiment."""
self.write_bashrc("etc/portage/bashrc")
self.write_makeconfig("etc/portage/make.conf")
self.write_wgetrc("etc/wgetrc")
self.write_layout("etc/portage/metadata/layout.conf")
mkfile_uchroot("/etc/resolv.conf")
cp("/etc/resolv.conf", "etc/resolv.conf")
config_file = CFG["config_file"].value()
if os.path.exists(str(config_file)):
paths, libs = \
uchroot_env(
uchroot_mounts("mnt",
CFG["container"]["mounts"].value()))
uchroot_cfg = CFG
uchroot_cfg["env"]["compiler_path"] = paths
uchroot_cfg["env"]["compiler_ld_library_path"] = libs
uchroot_cfg["env"]["binary_path"] = paths
uchroot_cfg["env"]["binary_ld_library_path"] = libs
uchroot_cfg["env"]["lookup_path"] = paths
uchroot_cfg["env"]["lookup_ld_library_path"] = libs
mkfile_uchroot("/.benchbuild.json")
uchroot_cfg.store(".benchbuild.json")
def run(self, context):
"""Setup a gentoo container suitable for PolyJIT."""
# Don't do something when running non-interactive.
if not sys.stdout.isatty():
return
with local.cwd(context.in_container):
self.configure()
sed_in_chroot = uchroot()["/bin/sed"]
emerge_in_chroot = uchroot()["/usr/bin/emerge"]
has_pkg = uchroot()["/usr/bin/qlist", "-I"]
run(sed_in_chroot["-i", '/CC=/d', "/etc/portage/make.conf"])
run(sed_in_chroot["-i", '/CXX=/d', "/etc/portage/make.conf"])
packages = \
CFG["container"]["strategy"]["polyjit"]["packages"].value()
with local.env(CC="gcc", CXX="g++",
MAKEOPTS="-j{0}".format(CFG["jobs"].value())):
if CFG["container"]["strategy"]["polyjit"]["sync"].value():
run(emerge_in_chroot["--sync"])
if CFG["container"]["strategy"]["polyjit"]["upgrade"].value():
run(emerge_in_chroot["--autounmask-only=y", "-uUDN",
"--with-bdeps=y", "@world"])
run(emerge_in_chroot["-uUDN", "--with-bdeps=y", "@world"])
for pkg in packages:
if (has_pkg[pkg["name"]] & TF):
continue
env = pkg["env"]
with local.env(**env):
run(emerge_in_chroot[pkg["name"]])
print("Packing new container image.")
with local.cwd(context.builddir):
pack_container(context.in_container, context.out_container)
class Container(cli.Application):
"""Manage uchroot containers."""
VERSION = CFG["version"].value()
def __init__(self, exe):
super(Container, self).__init__(exe)
@cli.switch(["-i", "--input-file"], str, help="Input container path")
def input_file(self, container):
"""Find the input path of a uchroot container."""
p = os.path.abspath(container)
if set_input_container(p, CFG):
return
p = find_hash(CFG["container"]["known"].value(), container)
if set_input_container(p, CFG):
return
raise ValueError("The path '{0}' does not exist.".format(p))
@cli.switch(["-o", "--output-file"], str, help="Output container path")
def output_file(self, container):
"""Find and writes the output path of a chroot container."""
p = os.path.abspath(container)
if os.path.exists(p):
if not ask("Path '{0}' already exists." " Overwrite?".format(p)):
sys.exit(0)
CFG["container"]["output"] = p
@cli.switch(["-s", "--shell"],
str,
help="The shell command we invoke inside the container.")
def shell(self, custom_shell):
"""The command to run inside the container."""
CFG["container"]["shell"] = custom_shell
@cli.switch(["-t", "-tmp-dir"],
cli.ExistingDirectory,
help="Temporary directory")
def builddir(self, tmpdir):
"""Set the current builddir of the container."""
CFG["build_dir"] = tmpdir
@cli.switch(
["m", "--mount"],
cli.ExistingDirectory,
list=True,
help="Mount the given directory under / inside the uchroot container")
def mounts(self, user_mount):
"""Save the current mount of the container into the settings."""
CFG["container"]["mounts"] = user_mount
verbosity = cli.CountOf('-v', help="Enable verbose output")
def main(self, *args):
log.configure()
_log = logging.getLogger()
_log.setLevel({
3: logging.DEBUG,
2: logging.INFO,
1: logging.WARNING,
0: logging.ERROR
}[self.verbosity])
update_env()
builddir = os.path.abspath(str(CFG["build_dir"].value()))
if not os.path.exists(builddir):
response = ask("The build directory {dirname} does not exist yet. "
"Should I create it?".format(dirname=builddir))
if response:
mkdir("-p", builddir)
print("Created directory {0}.".format(builddir))
setup_directories(builddir)
@Container.subcommand("run")
class ContainerRun(cli.Application):
"""Execute commannds inside a prebuilt container."""
def main(self, *args):
builddir = CFG["build_dir"].value()
in_container = CFG["container"]["input"].value()
mounts = CFG["container"]["mounts"].value()
if (in_container is None) or not os.path.exists(in_container):
in_is_file = False
in_container = Gentoo().local
else:
in_is_file = os.path.isfile(in_container)
if in_is_file:
clean_directories(builddir)
setup_directories(builddir)
in_container = setup_container(builddir, in_container)
run_in_container(args, in_container, mounts)
clean_directories(builddir, in_is_file, False)
@Container.subcommand("create")
class ContainerCreate(cli.Application):
"""
Create a new container with a predefined strategy.
We offer a variety of creation policies for a new container. By default a
basic 'spawn a bash' policy is used. This just leaves you inside a bash
that is started in the extracted container. After customization you can
exit the bash and pack up the result.
"""
_strategy = BashStrategy()
@cli.switch(["-S", "--strategy"],
cli.Set("bash", "polyjit", case_sensitive=False),
help="Defines the strategy used to create a new container.",
mandatory=False)
def strategy(self, strategy):
self._strategy = {
"bash": BashStrategy(),
"polyjit": SetupPolyJITGentooStrategy()
}[strategy]
def main(self, *args):
builddir = CFG["build_dir"].value()
in_container = CFG["container"]["input"].value()
out_container = CFG["container"]["output"].value()
mounts = CFG["container"]["mounts"].value()
shell = CFG["container"]["shell"].value()
if (in_container is None) or not os.path.exists(in_container):
in_container = Gentoo().local
in_is_file = os.path.isfile(in_container)
if in_is_file:
in_container = setup_container(builddir, in_container)
self._strategy.run(MockObj(builddir=builddir,
in_container=in_container,
out_container=out_container,
mounts=mounts,
shell=shell))
clean_directories(builddir, in_is_file, True)
@Container.subcommand("bootstrap")
class ContainerBootstrap(cli.Application):
"""Check for the needed files."""
def install_cmake_and_exit(self):
"""Tell the user to install cmake and aborts the current process."""
print("You need to install cmake via your package manager manually."
" Exiting.")
sys.exit(-1)
def main(self, *args):
print("Checking container binary dependencies...")
if not find_package("uchroot"):
if not find_package("cmake"):
self.install_cmake_and_exit()
install_uchroot()
print("...OK")
config_file = CFG["config_file"].value()
if not (config_file and os.path.exists(config_file)):
config_file = ".benchbuild.json"
CFG.store(config_file)
print("Storing config in {0}".format(os.path.abspath(config_file)))
print(
"Future container commands from this directory will automatically"
" source the config file.")
@Container.subcommand("list")
class ContainerList(cli.Application):
"""Prints a list of the known containers."""
def main(self, *args):
containers = CFG["container"]["known"].value()
for c in containers:
print("[{1:.8s}] {0}".format(c["path"], str(c["hash"])))
def main(*args):
return Container.run(*args)
| mit |
jaharkes/home-assistant | homeassistant/components/fan/__init__.py | 10 | 7305 | """
Provides functionality to interact with fans.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/fan/
"""
import logging
import os
import voluptuous as vol
from homeassistant.components import group
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (SERVICE_TURN_ON, SERVICE_TOGGLE,
SERVICE_TURN_OFF, ATTR_ENTITY_ID,
STATE_UNKNOWN)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
DOMAIN = 'fan'
SCAN_INTERVAL = 30
GROUP_NAME_ALL_FANS = 'all fans'
ENTITY_ID_ALL_FANS = group.ENTITY_ID_FORMAT.format(GROUP_NAME_ALL_FANS)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Bitfield of features supported by the fan entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
SUPPORT_SET_SPEED = 1
SUPPORT_OSCILLATE = 2
SERVICE_SET_SPEED = 'set_speed'
SERVICE_OSCILLATE = 'oscillate'
SPEED_OFF = 'off'
SPEED_LOW = 'low'
SPEED_MED = 'med'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
ATTR_SPEED = 'speed'
ATTR_SPEED_LIST = 'speed_list'
ATTR_OSCILLATING = 'oscillating'
PROP_TO_ATTR = {
'speed': ATTR_SPEED,
'speed_list': ATTR_SPEED_LIST,
'oscillating': ATTR_OSCILLATING,
'supported_features': ATTR_SUPPORTED_FEATURES,
} # type: dict
FAN_SET_SPEED_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SPEED): cv.string
}) # type: dict
FAN_TURN_ON_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_SPEED): cv.string
}) # type: dict
FAN_TURN_OFF_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids
}) # type: dict
FAN_OSCILLATE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_OSCILLATING): cv.boolean
}) # type: dict
FAN_TOGGLE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids
})
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id: str=None) -> bool:
"""Return if the fans are on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_FANS
state = hass.states.get(entity_id)
return state.attributes[ATTR_SPEED] not in [SPEED_OFF, STATE_UNKNOWN]
def turn_on(hass, entity_id: str=None, speed: str=None) -> None:
"""Turn all or specified fan on."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_SPEED, speed),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id: str=None) -> None:
"""Turn all or specified fan off."""
data = {
ATTR_ENTITY_ID: entity_id,
}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id: str=None) -> None:
"""Toggle all or specified fans."""
data = {
ATTR_ENTITY_ID: entity_id
}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def oscillate(hass, entity_id: str=None, should_oscillate: bool=True) -> None:
"""Set oscillation on all or specified fan."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_OSCILLATING, should_oscillate),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_OSCILLATE, data)
def set_speed(hass, entity_id: str=None, speed: str=None) -> None:
"""Set speed for all or specified fan."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_SPEED, speed),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_SET_SPEED, data)
def setup(hass, config: dict) -> None:
"""Expose fan control via statemachine and services."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_FANS)
component.setup(config)
def handle_fan_service(service: str) -> None:
"""Hande service call for fans."""
# Get the validated data
params = service.data.copy()
# Convert the entity ids to valid fan ids
target_fans = component.extract_from_service(service)
params.pop(ATTR_ENTITY_ID, None)
service_fun = None
for service_def in [SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_SET_SPEED, SERVICE_OSCILLATE]:
if service_def == service.service:
service_fun = service_def
break
if service_fun:
for fan in target_fans:
getattr(fan, service_fun)(**params)
for fan in target_fans:
if fan.should_poll:
fan.update_ha_state(True)
return
# Listen for fan service calls.
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_fan_service,
descriptions.get(SERVICE_TURN_ON),
schema=FAN_TURN_ON_SCHEMA)
hass.services.register(DOMAIN, SERVICE_TURN_OFF, handle_fan_service,
descriptions.get(SERVICE_TURN_OFF),
schema=FAN_TURN_OFF_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_SPEED, handle_fan_service,
descriptions.get(SERVICE_SET_SPEED),
schema=FAN_SET_SPEED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_OSCILLATE, handle_fan_service,
descriptions.get(SERVICE_OSCILLATE),
schema=FAN_OSCILLATE_SCHEMA)
return True
class FanEntity(ToggleEntity):
"""Representation of a fan."""
# pylint: disable=no-self-use
def set_speed(self: ToggleEntity, speed: str) -> None:
"""Set the speed of the fan."""
pass
def turn_on(self: ToggleEntity, speed: str=None, **kwargs) -> None:
"""Turn on the fan."""
raise NotImplementedError()
def turn_off(self: ToggleEntity, **kwargs) -> None:
"""Turn off the fan."""
raise NotImplementedError()
def oscillate(self: ToggleEntity, oscillating: bool) -> None:
"""Oscillate the fan."""
pass
@property
def is_on(self):
"""Return true if the entity is on."""
return self.state_attributes.get(ATTR_SPEED, STATE_UNKNOWN) \
not in [SPEED_OFF, STATE_UNKNOWN]
@property
def speed_list(self: ToggleEntity) -> list:
"""Get the list of available speeds."""
return []
@property
def state_attributes(self: ToggleEntity) -> dict:
"""Return optional state attributes."""
data = {} # type: dict
for prop, attr in PROP_TO_ATTR.items():
if not hasattr(self, prop):
continue
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
@property
def supported_features(self: ToggleEntity) -> int:
"""Flag supported features."""
return 0
| mit |
aptivate/sarpaminfohub | django/sarpaminfohub/infohub/tests/utils_tests.py | 1 | 1929 | # -*- coding: iso-8859-15 -*-
from sarpaminfohub.infohub.tests.sarpam_test_case import SarpamTestCase
import sarpaminfohub.infohub.utils as utils
class UtilsTest(SarpamTestCase):
def test_none_returned_for_median_of_empty_list(self):
empty_list = []
median = utils.get_median(empty_list)
self.assertEquals(None, median)
def test_middle_value_returned_for_median_of_list_with_odd_length(self):
price_list = [0.09, 0.05, 0.14]
median = utils.get_median(price_list)
self.assertAlmostEquals(0.09, median)
def test_average_of_middle_values_returned_for_median_of_list_with_even_length(self):
price_list = [0.04, 0.05, 0.14, 0.07]
median = utils.get_median(price_list)
self.assertAlmostEquals(0.06, median)
def test_none_values_ignored_when_calculating_median_fob_price_of_list(self):
price_list = [{'fob_price':None, 'landed_price':None},
{'fob_price':0.09, 'landed_price':None},
{'fob_price':None, 'landed_price':None},
{'fob_price':0.05, 'landed_price':None},
{'fob_price':None, 'landed_price':None},
{'fob_price':0.14, 'landed_price':None}]
median = utils.get_median_prices(price_list)
self.assertAlmostEquals(0.09, median[0])
def test_none_values_ignored_when_calculating_median_landed_price_of_list(self):
price_list = [{'landed_price':None, 'fob_price':None},
{'landed_price':0.09, 'fob_price':None},
{'landed_price':None, 'fob_price':None},
{'landed_price':0.05, 'fob_price':None},
{'landed_price':None, 'fob_price':None},
{'landed_price':0.14, 'fob_price':None}]
median = utils.get_median_prices(price_list)
self.assertAlmostEquals(0.09, median[1])
| gpl-3.0 |
tlatzko/spmcluster | .tox/2.6-cover/lib/python2.6/site-packages/pip/_vendor/requests/compat.py | 1039 | 1469 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| bsd-2-clause |
Pierre-Thibault/neo-insert-imports | test/result_dir/blank_lines_only.py | 1 | 6254 |
# Static analyzer import helpers: (STATIC_IMPORT_MARK)
if 0:
import gluon
global cache; cache = gluon.cache.Cache()
global LOAD; LOAD = gluon.compileapp.LoadFactory()
import gluon.compileapp.local_import_aux as local_import #@UnusedImport
from gluon.contrib.gql import GQLDB #@UnusedImport
from gluon.dal import Field #@UnusedImport
global request; request = gluon.globals.Request()
global response; response = gluon.globals.Response()
global session; session = gluon.globals.Session()
from gluon.html import A #@UnusedImport
from gluon.html import B #@UnusedImport
from gluon.html import BEAUTIFY #@UnusedImport
from gluon.html import BODY #@UnusedImport
from gluon.html import BR #@UnusedImport
from gluon.html import CENTER #@UnusedImport
from gluon.html import CODE #@UnusedImport
from gluon.html import DIV #@UnusedImport
from gluon.html import EM #@UnusedImport
from gluon.html import EMBED #@UnusedImport
from gluon.html import embed64 #@UnusedImport
from gluon.html import FIELDSET #@UnusedImport
from gluon.html import FORM #@UnusedImport
from gluon.html import H1 #@UnusedImport
from gluon.html import H2 #@UnusedImport
from gluon.html import H3 #@UnusedImport
from gluon.html import H4 #@UnusedImport
from gluon.html import H5 #@UnusedImport
from gluon.html import H6 #@UnusedImport
from gluon.html import HEAD #@UnusedImport
from gluon.html import HR #@UnusedImport
from gluon.html import HTML #@UnusedImport
from gluon.html import I #@UnusedImport
from gluon.html import IFRAME #@UnusedImport
from gluon.html import IMG #@UnusedImport
from gluon.html import INPUT #@UnusedImport
from gluon.html import LABEL #@UnusedImport
from gluon.html import LEGEND #@UnusedImport
from gluon.html import LI #@UnusedImport
from gluon.html import LINK #@UnusedImport
from gluon.html import MARKMIN #@UnusedImport
from gluon.html import MENU #@UnusedImport
from gluon.html import META #@UnusedImport
from gluon.html import OBJECT #@UnusedImport
from gluon.html import OL #@UnusedImport
from gluon.html import ON #@UnusedImport
from gluon.html import OPTGROUP #@UnusedImport
from gluon.html import OPTION #@UnusedImport
from gluon.html import P #@UnusedImport
from gluon.html import PRE #@UnusedImport
from gluon.html import STYLE #@UnusedImport
from gluon.html import SCRIPT #@UnusedImport
from gluon.html import SELECT #@UnusedImport
from gluon.html import SPAN #@UnusedImport
from gluon.html import TABLE #@UnusedImport
from gluon.html import TAG #@UnusedImport
from gluon.html import TBODY #@UnusedImport
from gluon.html import TD #@UnusedImport
from gluon.html import TEXTAREA #@UnusedImport
from gluon.html import TFOOT #@UnusedImport
from gluon.html import TH #@UnusedImport
from gluon.html import THEAD #@UnusedImport
from gluon.html import TITLE #@UnusedImport
from gluon.html import TR #@UnusedImport
from gluon.html import TT #@UnusedImport
from gluon.html import UL #@UnusedImport
from gluon.html import URL #@UnusedImport
from gluon.html import XHTML #@UnusedImport
from gluon.html import XML #@UnusedImport
from gluon.html import xmlescape #@UnusedImport
from gluon.http import HTTP #@UnusedImport
from gluon.http import redirect #@UnusedImport
import gluon.languages.translator as T #@UnusedImport
from gluon.sql import DAL
global db; db = DAL()
from gluon.sql import SQLDB #@UnusedImport
from gluon.sql import SQLField #@UnusedImport
from gluon.sqlhtml import SQLFORM #@UnusedImport
from gluon.sqlhtml import SQLTABLE #@UnusedImport
from gluon.tools import Auth
global auth; auth = Auth()
from gluon.tools import Crud
global crud; crud = Crud()
from gluon.tools import fetch #@UnusedImport
from gluon.tools import geocode #@UnusedImport
from gluon.tools import Mail
global mail; mail = Mail()
from gluon.tools import PluginManager
global plugins; plugins = PluginManager()
from gluon.tools import prettydate #@UnusedImport
from gluon.tools import Recaptcha #@UnusedImport
from gluon.tools import Service
global service; service = Service()
from gluon.validators import CLEANUP #@UnusedImport
from gluon.validators import CRYPT #@UnusedImport
from gluon.validators import IS_ALPHANUMERIC #@UnusedImport
from gluon.validators import IS_DATE #@UnusedImport
from gluon.validators import IS_DATE_IN_RANGE #@UnusedImport
from gluon.validators import IS_DATETIME #@UnusedImport
from gluon.validators import IS_DATETIME_IN_RANGE #@UnusedImport
from gluon.validators import IS_DECIMAL_IN_RANGE #@UnusedImport
from gluon.validators import IS_EMAIL #@UnusedImport
from gluon.validators import IS_EMPTY_OR #@UnusedImport
from gluon.validators import IS_EQUAL_TO #@UnusedImport
from gluon.validators import IS_EXPR #@UnusedImport
from gluon.validators import IS_FLOAT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IMAGE #@UnusedImport
from gluon.validators import IS_IN_DB #@UnusedImport
from gluon.validators import IS_IN_SET #@UnusedImport
from gluon.validators import IS_INT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IPV4 #@UnusedImport
from gluon.validators import IS_LENGTH #@UnusedImport
from gluon.validators import IS_LIST_OF #@UnusedImport
from gluon.validators import IS_LOWER #@UnusedImport
from gluon.validators import IS_MATCH #@UnusedImport
from gluon.validators import IS_NOT_EMPTY #@UnusedImport
from gluon.validators import IS_NOT_IN_DB #@UnusedImport
from gluon.validators import IS_NULL_OR #@UnusedImport
from gluon.validators import IS_SLUG #@UnusedImport
from gluon.validators import IS_STRONG #@UnusedImport
from gluon.validators import IS_TIME #@UnusedImport
from gluon.validators import IS_UPLOAD_FILENAME #@UnusedImport
from gluon.validators import IS_UPPER #@UnusedImport
from gluon.validators import IS_URL #@UnusedImport
| mit |
yuhcaesar/emacsrc | .emacs.d/.python-environments/default/Lib/encodings/rot_13.py | 88 | 2697 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| gpl-2.0 |
machristie/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/samples/AiravataClient.py | 2 | 10414 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, ConfigParser
import time
sys.path.append('../lib')
sys.path.append('../') # necessary on some machines
from apache.airavata.api import Airavata
from apache.airavata.model.experiment.ttypes import ExperimentModel, UserConfigurationDataModel, ExperimentType
from apache.airavata.model.workspace.ttypes import Project
from apache.airavata.model.scheduling.ttypes import ComputationalResourceSchedulingModel
from apache.airavata.model.security.ttypes import AuthzToken
from apache.airavata.model.status.ttypes import ExperimentState
from thrift import Thrift
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from thrift.transport.THttpClient import THttpClient
# from apache.airavata.api import Airavata
# from apache.airavata.model.experiment.ttypes import *
# from apache.airavata.model.security.ttypes import *
# from apache.airavata.model.status.ttypes import *
# from apache.airavata.model.scheduling.ttypes import *
# from thrift import Thrift
# from thrift.transport import TSocket
# from thrift.transport import TTransport
# from thrift.protocol import TBinaryProtocol
class AiravataClient():
"""Wrapper around airavataClient object"""
def __init__(self, config):
# Read Airavata Client properties
airavataConfig = ConfigParser.RawConfigParser()
airavataConfig.read(config)
self.host = airavataConfig.get('AiravataServer', 'host')
self.port = airavataConfig.getint('AiravataServer', 'port')
self.cred = airavataConfig.get('GatewayProperties', 'cred_token_id')
self.gatewayId = airavataConfig.get('GatewayProperties', 'gateway_id')
# Create a socket to the Airavata Server
socket = TSocket.TSocket(self.host, self.port)
socket.setTimeout(10000)
# Use Buffered Protocol to speed up over raw sockets
self.transport = TTransport.TBufferedTransport(socket)
# Airavata currently uses Binary Protocol
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a Airavata client to use the protocol encoder
self.client = Airavata.Client(protocol)
# Create dummy token
self.token = AuthzToken("default-token")
# Context manager methods
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def open(self):
self.transport.open()
def close(self):
self.transport.close()
def printProperties(self):
print 'Host: {}'.format(self.host)
print 'Port: {}'.format(self.port)
def printVersion(self):
print 'Server version: {}'.format(self.client.getAPIVersion(self.token))
def getAllAppModules(self, gatewayId):
return self.client.getAllAppModules(self.token, gatewayId)
def getAllComputeResourceNames(self):
return self.client.getAllComputeResourceNames(self.token)
def getComputeResource(self, computeResourceId):
return self.client.getComputeResource(self.token, computeResourceId)
def getAllApplicationInterfaceNames(self):
return self.client.getAllApplicationInterfaceNames(self.token, self.gatewayId)
def getExperiment(self, expId):
"""
Returns:
The airavata experiment_model.ExperimentModel corresponding to experiment ID
"""
experiment = self.client.getExperiment(self.token, expId)
# print 'Experiment configurationdata->computationalResource: {}'.format(experiment.userConfigurationData.computationalResourceScheduling.queueName)
return experiment
def getExperimentsInProject(self, projectId, limit, offset):
"""
Args:
projectId: (str) Identifier of the project
limit: (int) Amount of results to be fetched
offset: (int) The starting point of the results to be fetched
Returns:
list<experiment_model.ExperimentModel>
"""
return self.client.getExperimentsInProject(self.token, projectId, limit, offset)
def getProject(self, projectId):
"""
Returns:
The airavata workspace_model.Project getProject corresponding to project ID
"""
return self.client.getProject(self.token, projectId)
def createProject(self, project):
return self.client.createProject(self.token, self.gatewayId, project)
def createSampleExperiment(self):
"""Creates a sample Amber experiment
Returns:
The experiment ID (str) corresponding to newly created experiment
"""
amberId = "Amber_66ca2b6c-ef36-409d-9f2b-67630c760609"
#amberId = "Amber_74ad818e-7633-476a-b861-952de9b0a529"
inputs = self.client.getApplicationInputs(self.token,amberId)
for input in inputs:
# print input.name
if input.name == "Heat_Restart_File":
input.value = "file:///home/airavata/production/appInputs/AMBER_FILES/02_Heat.rst"
elif input.name == "Parameter_Topology_File":
input.value ="file:///home/airavata/production/appInputs/AMBER_FILES/prmtop"
elif input.name == "Production_Control_File":
input.value = "file:///home/airavata/production/appInputs/AMBER_FILES/03_Prod.in"
outputs = self.client.getApplicationOutputs(self.token, amberId)
# for output in outputs:
# print output.name
#projectId = "gsoc_2015_be5a201b-9228-4dd9-9961-ba61b17bf527"
projectId = "test_project_dd38ab8f-74ae-4ae6-a3ab-2c08cd41b77b"
stampedeHostName = "stampede.tacc.xsede.org"
numberOfExps = len(self.getExperimentsInProject(projectId, 100, 0))
experiment = ExperimentModel()
experiment.gatewayId = 'default'
experiment.projectId = projectId
experiment.experimentType = ExperimentType.SINGLE_APPLICATION
experiment.userName = "admin"
# So i can keep track of how many experiments I have submitted
experiment.experimentName = "Sample_experiment_{0}".format(numberOfExps + 1)
experiment.description = "Test experiment"
experiment.executionId = amberId
experiment.experimentInputs = inputs
experiment.experimentOutputs = outputs
computeResources = self.client.getAvailableAppInterfaceComputeResources(self.token, amberId)
id = None
for key, value in computeResources.iteritems():
# print key , " " , value
if value == stampedeHostName:
id = key
break
# Create a computational resource scheduling model
crsm = ComputationalResourceSchedulingModel()
crsm.totalCPUCount = 4
crsm.nodeCount = 1
crsm.queueName = "development"
crsm.wallTimeLimit = 30
crsm.totalPhysicalMemory = 1
crsm.resourceHostId = id
ucdm = UserConfigurationDataModel()
ucdm.computationalResourceScheduling = crsm
ucdm.airavataAutoSchedule = False
ucdm.overrideManualScheduledParams = False
experiment.userConfigurationData = ucdm
expId = self.client.createExperiment(self.token, "default", experiment)
return expId
def createExperiment(self, experiment):
"""
Args:
experiment = (experiment_model.ExperimentModel)
Returns:
experiment ID (str) of newly created experiment
"""
return self.client.createExperiment(self.token, self.gatewayId, experiment)
def cloneExperiment(self, existingExperimentId, newExperimentName):
"""Clone a specified experiment with a new name. A copy of the experiment configuration is
made and is persisted with new metadata. The client has to subsequently update this
configuration if needed and launch the cloned experiment.
Args:
existingExperimentId = (str)
newExperimentName = (str)
Returns:
experiment ID (str) of newly created experiment
"""
return self.client.cloneExperiment(self.token, existingExperimentId, newExperimentName)
def launchExperiment(self, expId):
self.client.launchExperiment(self.token, expId, self.cred)
def getExperimentStatus(self, expId):
"""
Returns:
status_models.ExperimentStatus
"""
return self.client.getExperimentStatus(self.token, expId)
def getJobStatuses(self, expId):
"""
Returns:
map<string, status_models.JobStatus>
"""
return self.client.getJobStatuses(self.token, expId)
def getJobDetails(self, expId):
"""
Returns:
list<job_model.JobModel>
"""
return self.client.getJobDetails(self.token, expId)
def getExperimentOutputs(self, expId):
"""
Returns:
list<application_io_models.OutputDataObjectType>
"""
return self.client.getExperimentOutputs(self.token, expId)
def getIntermediateOutputs(self, expId):
"""
Returns:
list<application_io_models.OutputDataObjectType>
"""
return self.client.getIntermediateOutputs(self.token, expId)
def validateExperiment(self, expId):
""" Validate experiment configuration. True generally indicates the experiment is
ready to be launched
Returns:
True or False
"""
return self.client.validateExperiment(self.token, expId)
| apache-2.0 |
redebian/documentation | django/core/serializers/json.py | 204 | 1920 | """
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| bsd-3-clause |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/config/config_error.py | 1 | 1975 | # Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'enkf_fs.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.config import CONFIG_LIB
from ert.cwrap import BaseCClass, CWrapper
class ConfigError(BaseCClass):
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly!")
def __getitem__(self, index):
""" @rtype: str """
if not isinstance(index, int):
raise TypeError("Expected an integer")
size = len(self)
if index >= size:
raise IndexError("Index out of range: %d < %d" % (index, size))
return ConfigError.cNamespace().iget(self, index)
def __len__(self):
""" @rtype: int """
return ConfigError.cNamespace().count(self)
def free(self):
ConfigError.cNamespace().free(self)
##################################################################
cwrapper = CWrapper(CONFIG_LIB)
cwrapper.registerType("config_error", ConfigError)
cwrapper.registerType("config_error_obj", ConfigError.createPythonObject)
cwrapper.registerType("config_error_ref", ConfigError.createCReference)
ConfigError.cNamespace().free = cwrapper.prototype("void config_error_free(config_error)")
ConfigError.cNamespace().count = cwrapper.prototype("int config_error_count(config_error)")
ConfigError.cNamespace().iget = cwrapper.prototype("char* config_error_iget(config_error, int)")
| gpl-3.0 |
Qalthos/ansible | lib/ansible/modules/network/ios/ios_linkagg.py | 57 | 9433 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_linkagg
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage link aggregation groups on Cisco IOS network devices
description:
- This module provides declarative management of link aggregation groups
on Cisco IOS network devices.
notes:
- Tested against IOS 15.2
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group. Range 1-255.
mode:
description:
- Mode of the link aggregation group.
choices: ['active', 'on', 'passive', 'auto', 'desirable']
members:
description:
- List of members of the link aggregation group.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent']
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
default: no
type: bool
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create link aggregation group
ios_linkagg:
group: 10
state: present
- name: delete link aggregation group
ios_linkagg:
group: 10
state: absent
- name: set link aggregation group to members
ios_linkagg:
group: 200
mode: active
members:
- GigabitEthernet0/0
- GigabitEthernet0/1
- name: remove link aggregation group from GigabitEthernet0/0
ios_linkagg:
group: 200
mode: active
members:
- GigabitEthernet0/1
- name: Create aggregate of linkagg definitions
ios_linkagg:
aggregate:
- { group: 3, mode: on, members: [GigabitEthernet0/1] }
- { group: 100, mode: passive, members: [GigabitEthernet0/2] }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface port-channel 30
- interface GigabitEthernet0/3
- channel-group 30 mode on
- no interface port-channel 30
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
group = w['group']
mode = w['mode']
members = w.get('members') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(group, have)
if state == 'absent':
if obj_in_have:
commands.append('no interface port-channel {0}'.format(group))
elif state == 'present':
cmd = ['interface port-channel {0}'.format(group),
'end']
if not obj_in_have:
if not group:
module.fail_json(msg='group is a required option')
commands.extend(cmd)
if members:
for m in members:
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
else:
if members:
if 'members' not in obj_in_have.keys():
for m in members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
elif set(members) != set(obj_in_have['members']):
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
superfluous_members = list(set(obj_in_have['members']) - set(members))
for m in superfluous_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0} mode {1}'.format(group, mode))
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['group'], want)
if not obj_in_want:
commands.append('no interface port-channel {0}'.format(h['group']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
obj.append(d)
else:
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state']
})
return obj
def parse_mode(module, config, group, member):
mode = None
netcfg = CustomNetworkConfig(indent=1, contents=config)
parents = ['interface {0}'.format(member)]
body = netcfg.get_section(parents)
match_int = re.findall(r'interface {0}\n'.format(member), body, re.M)
if match_int:
match = re.search(r'channel-group {0} mode (\S+)'.format(group), body, re.M)
if match:
mode = match.group(1)
return mode
def parse_members(module, config, group):
members = []
for line in config.strip().split('!'):
l = line.strip()
if l.startswith('interface'):
match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M)
if match_group:
match = re.search(r'interface (\S+)', l, re.M)
if match:
members.append(match.group(1))
return members
def get_channel(module, config, group):
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return {}
channel = {}
for item in set(match):
member = item
channel['mode'] = parse_mode(module, config, group, member)
channel['members'] = parse_members(module, config, group)
return channel
def map_config_to_obj(module):
objs = list()
config = get_config(module)
for line in config.split('\n'):
l = line.strip()
match = re.search(r'interface Port-channel(\S+)', l, re.M)
if match:
obj = {}
group = match.group(1)
obj['group'] = group
obj.update(get_channel(module, config, group))
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
group=dict(type='int'),
mode=dict(choices=['active', 'on', 'passive', 'auto', 'desirable']),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True)
required_one_of = [['group', 'aggregate']]
required_together = [['members', 'mode']]
mutually_exclusive = [['group', 'aggregate']]
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec,
required_together=required_together),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
yaroslavvb/tensorflow | tensorflow/python/ops/gradients.py | 138 | 1417 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
# TODO(drpng): find a good place to reference this.
"AggregationMethod",
"gradients", # tf.gradients.gradients.
"hessians", # tf.gradients.hessians
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
sujeetv/incubator-hawq | tools/bin/gppylib/operations/dump.py | 9 | 44658 | import os
import tempfile
from datetime import datetime
from gppylib import gplog
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL, execSQLForSingleton, UnexpectedRowsError
from gppylib.commands.base import Command, REMOTE, ExecutionError
from gppylib.commands.gp import Psql
from gppylib.commands.unix import getUserName, findCmdInPath, curr_platform, SUNOS
from gppylib.gparray import GpArray
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.operations import Operation
from gppylib.operations.unix import CheckDir, CheckFile, ListFiles, ListFilesByPattern, MakeDir, RemoveFile, RemoveTree, RemoveRemoteTree
from gppylib.operations.utils import RemoteOperation, ParallelOperation
logger = gplog.get_default_logger()
# MPP-15307
# DUMP_DATE dictates the db_dumps/ subdirectory to which gpcrondump will dump.
# It is computed just once to ensure different pieces of logic herein operate on the same subdirectory.
DUMP_DATE = datetime.now().strftime("%Y%m%d")
DUMP_DIR = 'db_dumps'
GLOBAL_PREFIX = 'gp_global_1_1_'
MASTER_DBDUMP_PREFIX = 'gp_dump_1_1_'
MASTER_STATUS_PREFIX = 'gp_dump_status_1_1_'
SEG_DBDUMP_PREFIX = 'gp_dump_0_'
SEG_STATUS_PREFIX = 'gp_dump_status_0_'
COMPRESSION_FACTOR = 12 # TODO: Where did 12 come from?
INJECT_GP_DUMP_FAILURE = None
class DumpDatabase(Operation):
# TODO: very verbose constructor = room for improvement. in the parent constructor, we could use kwargs
# to automatically take in all arguments and perhaps do some data type validation.
def __init__(self, dump_database, dump_schema, include_dump_tables, exclude_dump_tables, include_dump_tables_file, exclude_dump_tables_file, backup_dir, report_dir, free_space_percent, compress, clear_catalog_dumps, encoding, output_options, batch_default, master_datadir, master_port):
self.dump_database = dump_database
self.dump_schema = dump_schema
self.include_dump_tables = include_dump_tables
self.exclude_dump_tables = exclude_dump_tables
self.include_dump_tables_file = include_dump_tables_file,
self.exclude_dump_tables_file = exclude_dump_tables_file,
self.backup_dir = backup_dir
self.report_dir = report_dir
self.free_space_percent = free_space_percent
self.compress = compress
self.clear_catalog_dumps = clear_catalog_dumps
self.encoding = encoding
self.output_options = output_options
self.batch_default = batch_default
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
self.exclude_dump_tables = ValidateDumpDatabase(dump_database = self.dump_database,
dump_schema = self.dump_schema,
include_dump_tables = self.include_dump_tables,
exclude_dump_tables = self.exclude_dump_tables,
include_dump_tables_file = self.include_dump_tables_file[0],
exclude_dump_tables_file = self.exclude_dump_tables_file[0],
backup_dir = self.backup_dir,
report_dir = self.report_dir,
free_space_percent = self.free_space_percent,
compress = self.compress,
batch_default = self.batch_default,
master_datadir = self.master_datadir,
master_port = self.master_port).run()
if self.backup_dir is not None:
dump_path = os.path.join(self.backup_dir, DUMP_DIR, DUMP_DATE)
else:
dump_path = os.path.join(DUMP_DIR, DUMP_DATE)
if self.report_dir is not None:
report_path = os.path.join(self.report_dir, DUMP_DIR, DUMP_DATE)
else:
report_path = os.path.join(self.master_datadir, DUMP_DIR, DUMP_DATE)
dump_line = "gp_dump -p %d -U %s --gp-d=%s --gp-r=%s --gp-s=p" % (self.master_port, getUserName(), dump_path, report_path)
if self.clear_catalog_dumps:
dump_line += " -c"
if self.compress:
logger.info("Adding compression parameter")
dump_line += " --gp-c"
if self.encoding is not None:
logger.info("Adding encoding %s" % self.encoding)
dump_line += " --encoding=%s" % self.encoding
"""
AK: Some ridiculous escaping here. I apologize.
These options get passed-through gp_dump to gp_dump_agent.
Commented out lines use escaping that would be reasonable, if gp_dump escaped properly.
"""
if self.dump_schema is not None:
logger.info("Adding schema name %s" % self.dump_schema)
dump_line += " -n \"\\\"%s\\\"\"" % self.dump_schema
#dump_line += " -n \"%s\"" % self.dump_schema
dump_line += " %s" % self.dump_database
for dump_table in self.include_dump_tables:
schema, table = dump_table.split('.')
dump_line += " --table=\"\\\"%s\\\"\".\"\\\"%s\\\"\"" % (schema, table)
#dump_line += " --table=\"%s\".\"%s\"" % (schema, table)
for dump_table in self.exclude_dump_tables:
schema, table = dump_table.split('.')
dump_line += " --exclude-table=\"\\\"%s\\\"\".\"\\\"%s\\\"\"" % (schema, table)
#dump_line += " --exclude-table=\"%s\".\"%s\"" % (schema, table)
if self.include_dump_tables_file[0] is not None:
dump_line += " --table-file=%s" % self.include_dump_tables_file
if self.exclude_dump_tables_file[0] is not None:
dump_line += " --exclude-table-file=%s" % self.exclude_dump_tables_file
for opt in self.output_options:
dump_line += " %s" % opt
logger.info("Dump command line %s" % dump_line)
logger.info("Starting dump process")
start = datetime.now()
cmd = Command('Invoking gp_dump', dump_line)
cmd.run()
rc = cmd.get_results().rc
if INJECT_GP_DUMP_FAILURE is not None:
rc = INJECT_GP_DUMP_FAILURE
if rc != 0:
logger.warn("Dump process returned exit code %d" % rc)
else:
logger.info("Dump process returned exit code 0")
end = datetime.now()
return {'timestamp_start': start.strftime("%Y%m%d%H%M%S"),
'time_start': start.strftime("%H:%M:%S"),
'time_end': end.strftime("%H:%M:%S"),
'exit_status': rc}
class PostDumpDatabase(Operation):
def __init__(self, timestamp_start, compress, backup_dir, report_dir, batch_default, master_datadir, master_port):
self.timestamp_start = timestamp_start
self.compress = compress
self.backup_dir = backup_dir
self.report_dir = report_dir
self.batch_default = batch_default
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
# First, get timestamp from .rpt file
path = self.report_dir if self.report_dir is not None else self.master_datadir
path = os.path.join(path, DUMP_DIR, DUMP_DATE)
reports = ListFilesByPattern(path, "gp_dump_*.rpt").run()
if not reports:
logger.error("Could not locate a report file on master.")
return {'exit_status': 2, 'timestamp': 'n/a'}
reports.sort()
reports.reverse()
report = reports[0]
timestamp = report[-18:-4] # last 14 digits, just before .rpt
if int(timestamp) < int(self.timestamp_start):
logger.error("Could not locate the newly generated report file on master.")
return {'exit_status': 2, 'timestamp': 'n/a'}
logger.info("Timestamp key = %s" % timestamp)
# Check master dumps
path = self.backup_dir if self.backup_dir is not None else self.master_datadir
path = os.path.join(path, DUMP_DIR, DUMP_DATE)
status_file = os.path.join(path, "%s%s" % (MASTER_STATUS_PREFIX, timestamp))
dump_file = os.path.join(path, "%s%s" % (MASTER_DBDUMP_PREFIX, timestamp))
if self.compress: dump_file += ".gz"
try:
PostDumpSegment(status_file = status_file,
dump_file = dump_file).run()
except NoStatusFile, e:
logger.warn('Status file %s not found on master' % status_file)
return {'exit_status': 1, 'timestamp': timestamp}
except StatusFileError, e:
logger.warn('Status file %s on master indicates errors' % status_file)
return {'exit_status': 1, 'timestamp': timestamp}
except NoDumpFile, e:
logger.warn('Dump file %s not found on master' % dump_file)
return {'exit_status': 1, 'timestamp': timestamp}
else:
logger.info('Checked master status file and master dump file.')
# Perform similar checks for primary segments
operations = []
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
path = self.backup_dir if self.backup_dir is not None else seg.getSegmentDataDirectory()
path = os.path.join(path, DUMP_DIR, DUMP_DATE)
status_file = os.path.join(path, "%s%d_%s" % (SEG_STATUS_PREFIX, seg.getSegmentDbId(), timestamp))
dump_file = os.path.join(path, "%s%d_%s" % (SEG_DBDUMP_PREFIX, seg.getSegmentDbId(), timestamp))
if self.compress: dump_file += ".gz"
operations.append(RemoteOperation(PostDumpSegment(status_file = status_file,
dump_file = dump_file),
seg.getSegmentHostName()))
ParallelOperation(operations, self.batch_default).run()
success = 0
for remote in operations:
host = remote.host
status_file = remote.operation.status_file
dump_file = remote.operation.dump_file
try:
remote.get_ret()
except NoStatusFile, e:
logger.warn('Status file %s not found on %s' % (status_file, host))
except StatusFileError, e:
logger.warn('Status file %s on %s indicates errors' % (status_file, host))
except NoDumpFile, e:
logger.warn('Dump file %s not found on %s' % (dump_file, host))
else:
success += 1
if success < len(operations):
logger.warn("Dump was unsuccessful. %d segment(s) failed post-dump checks." % (len(operations) - success))
return {'exit_status': 1, 'timestamp': timestamp}
return {'exit_status': 0, 'timestamp': timestamp}
class PostDumpSegment(Operation):
def __init__(self, status_file, dump_file):
self.status_file = status_file
self.dump_file = dump_file
def execute(self):
# Ensure that status file exists
if not CheckFile(self.status_file).run():
logger.error('Could not locate status file: %s' % self.status_file)
raise NoStatusFile()
# Ensure that status file indicates successful dump
with open(self.status_file, 'r') as f:
for line in f:
if line.find("Finished successfully") != -1:
break
else:
logger.error("Status report file indicates errors: %s" % self.status_file)
for line in f:
logger.info(line)
logger.error("Status file contents dumped to log file")
raise StatusFileError()
# Ensure that dump file exists
if not CheckFile(self.dump_file).run():
logger.error("Could not locate dump file: %s" % self.dump_file)
raise NoDumpFile()
class NoStatusFile(Exception): pass
class StatusFileError(Exception): pass
class NoDumpFile(Exception): pass
class ValidateDumpDatabase(Operation):
def __init__(self, dump_database, dump_schema, include_dump_tables, exclude_dump_tables,
include_dump_tables_file, exclude_dump_tables_file, backup_dir, report_dir,
free_space_percent, compress, batch_default, master_datadir, master_port):
self.dump_database = dump_database
self.dump_schema = dump_schema
self.include_dump_tables = include_dump_tables
self.exclude_dump_tables = exclude_dump_tables
self.include_dump_tables_file = include_dump_tables_file
self.exclude_dump_tables_file = exclude_dump_tables_file
self.backup_dir = backup_dir
self.report_dir = report_dir
self.free_space_percent = free_space_percent
self.compress = compress
self.batch_default = batch_default
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
ValidateDatabaseExists(database = self.dump_database,
master_port = self.master_port).run()
if self.dump_schema is not None:
ValidateSchemaExists(database = self.dump_database,
schema = self.dump_schema,
master_port = self.master_port).run()
ValidateCluster(master_port = self.master_port).run()
ValidateAllDumpDirs(backup_dir = self.backup_dir,
report_dir = self.report_dir,
batch_default = self.batch_default,
master_datadir = self.master_datadir,
master_port = self.master_port).run()
self.exclude_dump_tables = ValidateDumpTargets(dump_database = self.dump_database,
dump_schema = self.dump_schema,
include_dump_tables = self.include_dump_tables,
exclude_dump_tables = self.exclude_dump_tables,
include_dump_tables_file = self.include_dump_tables_file,
exclude_dump_tables_file = self.exclude_dump_tables_file,
master_port = self.master_port).run()
if self.free_space_percent is not None:
ValidateDiskSpace(free_space_percent = self.free_space_percent,
compress = self.compress,
dump_database = self.dump_database,
include_dump_tables = self.include_dump_tables,
batch_default = self.batch_default,
master_port = self.master_port).run()
return self.exclude_dump_tables
class ValidateDiskSpace(Operation):
# TODO: this doesn't take into account that multiple segments may be dumping to the same logical disk.
def __init__(self, free_space_percent, compress, dump_database, include_dump_tables, batch_default, master_port):
self.free_space_percent = free_space_percent
self.compress = compress
self.dump_database = dump_database
self.include_dump_tables = include_dump_tables
self.batch_default = batch_default
self.master_port = master_port
def execute(self):
ValidateGpToolkit(database = self.dump_database,
master_port = self.master_port).run()
operations = []
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
operations.append(RemoteOperation(ValidateSegDiskSpace(free_space_percent = self.free_space_percent,
compress = self.compress,
dump_database = self.dump_database,
include_dump_tables = self.include_dump_tables,
datadir = seg.getSegmentDataDirectory(),
segport = seg.getSegmentPort()),
seg.getSegmentHostName()))
ParallelOperation(operations, self.batch_default).run()
success = 0
for remote in operations:
host = remote.host
try:
remote.get_ret()
except NotEnoughDiskSpace, e:
logger.error("%s has insufficient disk space. [Need: %dK, Free %dK]" % (host, e.needed_space, e.free_space))
else:
success += 1
if success < len(operations):
raise ExceptionNoStackTraceNeeded("Cannot continue. %d segment(s) failed disk space checks" % (len(operations) - success))
class ValidateSegDiskSpace(Operation):
# TODO: this estimation of needed space needs work. it doesn't include schemas or exclusion tables.
def __init__(self, free_space_percent, compress, dump_database, include_dump_tables, datadir, segport):
self.free_space_percent = free_space_percent
self.compress = compress
self.dump_database = dump_database
self.include_dump_tables = include_dump_tables
self.datadir = datadir
self.segport = segport
def execute(self):
needed_space = 0
dburl = dbconn.DbURL(dbname=self.dump_database, port=self.segport)
conn = None
try:
conn = dbconn.connect(dburl, utility=True)
if self.include_dump_tables:
for dump_table in self.include_dump_tables:
needed_space += execSQLForSingleton(conn, "SELECT pg_relation_size('%s')/1024;" % dump_table)
else:
needed_space = execSQLForSingleton(conn, "SELECT pg_database_size('%s')/1024;" % self.dump_database)
finally:
if conn is not None:
conn.close()
if self.compress:
needed_space = needed_space / COMPRESSION_FACTOR
# get free available space
stat_res = os.statvfs(self.datadir);
free_space = (stat_res.f_bavail * stat_res.f_frsize) / 1024
if free_space == 0 or (free_space - needed_space) / free_space < self.free_space_percent / 100:
logger.error("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
raise NotEnoughDiskSpace(free_space, needed_space)
logger.info("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
class NotEnoughDiskSpace(Exception):
def __init__(self, free_space, needed_space):
self.free_space, self.needed_space = free_space, needed_space
Exception.__init__(self, free_space, needed_space)
class ValidateGpToolkit(Operation):
def __init__(self, database, master_port):
self.database = database
self.master_port = master_port
def execute(self):
dburl = dbconn.DbURL(dbname=self.database, port=self.master_port)
conn = None
try:
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_namespace.nspname = 'gp_toolkit' and pg_class.relnamespace = pg_namespace.oid")
finally:
if conn is not None:
conn.close()
if count > 0:
logger.debug("gp_toolkit exists within database %s." % self.database)
return
logger.info("gp_toolkit not found. Installing...")
Psql('Installing gp_toolkit',
filename='$GPHOME/share/postgresql/gp_toolkit.sql',
database=self.database,
port=self.master_port).run(validateAfter=True)
class ValidateAllDumpDirs(Operation):
def __init__(self, backup_dir, report_dir, batch_default, master_datadir, master_port):
self.backup_dir = backup_dir
self.report_dir = report_dir
self.batch_default = batch_default
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
if self.backup_dir is not None and self.report_dir is not None:
master_dirs_to_check = [self.backup_dir, self.report_dir]
elif self.backup_dir is not None:
master_dirs_to_check = [self.backup_dir, self.master_datadir]
elif self.report_dir is not None:
master_dirs_to_check = [self.report_dir, self.master_datadir]
else:
master_dirs_to_check = [self.master_datadir]
for dir in master_dirs_to_check:
try:
ValidateDumpDirs(dir).run()
except DumpDirCreateFailed, e:
raise ExceptionNoStackTraceNeeded('Could not create %s on master. Cannot continue.' % dir)
except DumpDirNotWritable, e:
raise ExceptionNoStackTraceNeeded('Could not write to %s on master. Cannot continue.' % dir)
else:
logger.info('Checked %s on master' % dir)
# Check backup target on segments (either master_datadir or backup_dir, if present)
operations = []
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
dir = self.backup_dir if self.backup_dir is not None else seg.getSegmentDataDirectory()
operations.append(RemoteOperation(ValidateDumpDirs(dir), seg.getSegmentHostName()))
ParallelOperation(operations, self.batch_default).run()
success = 0
for remote in operations:
dir = remote.operation.dir
host = remote.host
try:
remote.get_ret()
except DumpDirCreateFailed, e:
logger.error("Could not create %s on %s." % (dir, host))
except DumpDirNotWritable, e:
logger.error("Could not write to %s on %s." % (dir, host))
else:
success += 1
if success < len(operations):
raise ExceptionNoStackTraceNeeded("Cannot continue. %d segment(s) failed directory checks" % (len(operations) - success))
class ValidateDumpDirs(Operation):
def __init__(self, dir):
self.dir = dir
def execute(self):
path = os.path.join(self.dir, DUMP_DIR, DUMP_DATE)
exists = CheckDir(path).run()
if exists:
logger.info("Directory %s exists" % path)
else:
logger.info("Directory %s not found, will try to create" % path)
try:
MakeDir(path).run()
except OSError, e:
logger.exception("Could not create directory %s" % path)
raise DumpDirCreateFailed()
else:
logger.info("Created %s" % path)
try:
with tempfile.TemporaryFile(dir=path) as f:
pass
except Exception, e:
logger.exception("Cannot write to %s" % path)
raise DumpDirNotWritable()
class DumpDirCreateFailed(Exception): pass
class DumpDirNotWritable(Exception): pass
class ValidateDumpTargets(Operation):
def __init__(self, dump_database, dump_schema, include_dump_tables, exclude_dump_tables,
include_dump_tables_file, exclude_dump_tables_file, master_port):
self.dump_database = dump_database
self.dump_schema = dump_schema
self.include_dump_tables = include_dump_tables
self.exclude_dump_tables = exclude_dump_tables
self.include_dump_tables_file = include_dump_tables_file
self.exclude_dump_tables_file = exclude_dump_tables_file
self.master_port = master_port
def execute(self):
if ((len(self.include_dump_tables) > 0 or (self.include_dump_tables_file is not None)) and
(len(self.exclude_dump_tables) > 0 or (self.exclude_dump_tables_file is not None))):
raise ExceptionNoStackTraceNeeded("Cannot use -t/--table-file and -T/--exclude-table-file options at same time")
elif len(self.include_dump_tables) > 0 or self.include_dump_tables_file is not None:
logger.info("Configuring for single-database, include-table dump")
ValidateIncludeTargets(dump_database = self.dump_database,
dump_schema = self.dump_schema,
include_dump_tables = self.include_dump_tables,
include_dump_tables_file = self.include_dump_tables_file,
master_port = self.master_port).run()
elif len(self.exclude_dump_tables) > 0 or self.exclude_dump_tables_file is not None:
logger.info("Configuring for single-database, exclude-table dump")
self.exclude_dump_tables = ValidateExcludeTargets(dump_database = self.dump_database,
dump_schema = self.dump_schema,
exclude_dump_tables = self.exclude_dump_tables,
exclude_dump_tables_file = self.exclude_dump_tables_file,
master_port = self.master_port).run()
else:
logger.info("Configuring for single database dump")
return self.exclude_dump_tables
class ValidateIncludeTargets(Operation):
def __init__(self, dump_database, dump_schema, include_dump_tables, include_dump_tables_file, master_port):
self.dump_database = dump_database
self.dump_schema = dump_schema
self.include_dump_tables = include_dump_tables
self.include_dump_tables_file = include_dump_tables_file
self.master_port = master_port
def execute(self):
dump_tables = []
for dump_table in self.include_dump_tables:
dump_tables.append(dump_table)
if self.include_dump_tables_file is not None:
include_file = open(self.include_dump_tables_file, 'rU')
if not include_file:
raise ExceptionNoStackTraceNeeded("Can't open file %s" % include_dump_tables_file);
for line in include_file:
dump_tables.append(line.strip('\n'));
include_file.close()
for dump_table in dump_tables:
if '.' not in dump_table:
raise ExceptionNoStackTraceNeeded("No schema name supplied for table %s" % dump_table)
schema, table = dump_table.split('.')
exists = CheckTableExists(schema = schema,
table = table,
database = self.dump_database,
master_port = self.master_port).run()
if exists:
logger.info("Located table %s in %s database" % (dump_table, self.dump_database))
else:
raise ExceptionNoStackTraceNeeded("Table %s does not exist in %s database" % (dump_table, self.dump_database))
if self.dump_schema is not None:
if self.dump_schema != schema:
raise ExceptionNoStackTraceNeeded("Schema name %s not same as schema on %s" % (self.dump_schema, dump_table))
class ValidateExcludeTargets(Operation):
def __init__(self, dump_database, dump_schema, exclude_dump_tables, exclude_dump_tables_file, master_port):
self.dump_database = dump_database
self.dump_schema = dump_schema
self.exclude_dump_tables = exclude_dump_tables
self.exclude_dump_tables_file = exclude_dump_tables_file
self.master_port = master_port
def execute(self):
rebuild_excludes = []
dump_tables = []
for dump_table in self.exclude_dump_tables:
dump_tables.append(dump_table)
if self.exclude_dump_tables_file is not None:
exclude_file = open(self.exclude_dump_tables_file, 'rU')
if not exclude_file:
raise ExceptionNoStackTraceNeeded("Can't open file %s" % exclude_dump_tables_file);
for line in exclude_file:
dump_tables.append(line.strip('\n'));
exclude_file.close()
for dump_table in dump_tables:
if '.' not in dump_table:
raise ExceptionNoStackTraceNeeded("No schema name supplied for exclude table %s" % dump_table)
schema, table = dump_table.split('.')
exists = CheckTableExists(schema = schema,
table = table,
database = self.dump_database,
master_port = self.master_port).run()
if exists:
if self.dump_schema != schema:
logger.info("Adding table %s to exclude list" % dump_table)
rebuild_excludes.append(dump_table)
else:
logger.warn("Schema dump request and exclude table %s not in that schema, ignoring" % dump_table)
else:
logger.warn("Exclude table %s does not exist in %s database, ignoring" % (dump_table, self.dump_database))
if len(rebuild_excludes) == 0:
logger.warn("All exclude table names have been removed due to issues, see log file")
return self.exclude_dump_tables
class ValidateDatabaseExists(Operation):
""" TODO: move this to gppylib.operations.common? """
def __init__(self, database, master_port):
self.master_port = master_port
self.database = database
def execute(self):
conn = None
try:
dburl = dbconn.DbURL(port = self.master_port )
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % self.database)
if count == 0:
raise ExceptionNoStackTraceNeeded("Database %s does not exist." % self.database)
finally:
if conn is not None:
conn.close()
class ValidateSchemaExists(Operation):
""" TODO: move this to gppylib.operations.common? """
def __init__(self, database, schema, master_port):
self.database = database
self.schema = schema
self.master_port = master_port
def execute(self):
conn = None
try:
dburl = dbconn.DbURL(port = self.master_port, dbname = self.database)
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % self.schema)
if count == 0:
raise ExceptionNoStackTraceNeeded("Schema %s does not exist in database %s." % (self.schema, self.database))
finally:
if conn is not None:
conn.close()
class CheckTableExists(Operation):
""" TODO: move this to gppylib.operations.common? """
def __init__(self, database, schema, table, master_port):
self.database = database
self.schema = schema
self.table = table
self.master_port = master_port
def execute(self):
try:
dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
conn = dbconn.connect(dburl)
count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (self.table, self.schema))
return count > 0
finally:
if conn is not None:
conn.close()
class ValidateCluster(Operation):
def __init__(self, master_port):
self.master_port = master_port
def execute(self):
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
failed_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) and seg.isSegmentDown()]
if len(failed_segs) != 0:
logger.warn("Failed primary segment instances detected")
failed_dbids = [seg.getSegmentDbid() for seg in failed_segs]
raise ExceptionNoStackTraceNeeded("Detected failed segment(s) with dbid=%s" % ",".join(failed_dbids))
class UpdateHistoryTable(Operation):
HISTORY_TABLE = "public.gpcrondump_history"
def __init__(self, dump_database, time_start, time_end, options_list, timestamp, dump_exit_status, pseudo_exit_status, master_port):
self.dump_database = dump_database
self.time_start = time_start
self.time_end = time_end
self.options_list = options_list
self.timestamp = timestamp
self.dump_exit_status = dump_exit_status
self.pseudo_exit_status = pseudo_exit_status
self.master_port = master_port
def execute(self):
schema, table = UpdateHistoryTable.HISTORY_TABLE.split('.')
exists = CheckTableExists(database = self.dump_database,
schema = schema,
table = table,
master_port = self.master_port).run()
if not exists:
conn = None
CREATE_HISTORY_TABLE = """ create table %s (rec_date timestamp, start_time char(8), end_time char(8), options text, dump_key varchar(20), dump_exit_status smallint, script_exit_status smallint, exit_text varchar(10)) distributed by (rec_date); """ % UpdateHistoryTable.HISTORY_TABLE
try:
dburl = dbconn.DbURL(port=self.master_port, dbname=self.dump_database)
conn = dbconn.connect(dburl)
execSQL(conn, CREATE_HISTORY_TABLE)
conn.commit()
except Exception, e:
logger.exception("Unable to create %s in %s database" % (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
return
else:
logger.info("Created %s in %s database" % (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
finally:
if conn is not None:
conn.close()
translate_rc_to_msg = { 0: "COMPLETED", 1: "WARNING", 2: "FATAL" }
exit_msg = translate_rc_to_msg[self.pseudo_exit_status]
APPEND_HISTORY_TABLE = """ insert into %s values (now(), '%s', '%s', '%s', '%s', %d, %d, '%s'); """ % (UpdateHistoryTable.HISTORY_TABLE, self.time_start, self.time_end, self.options_list, self.timestamp, self.dump_exit_status, self.pseudo_exit_status, exit_msg)
conn = None
try:
dburl = dbconn.DbURL(port=self.master_port, dbname=self.dump_database)
conn = dbconn.connect(dburl)
execSQL(conn, APPEND_HISTORY_TABLE)
conn.commit()
except Exception, e:
logger.exception("Failed to insert record into %s in %s database" % (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
else:
logger.info("Inserted dump record into %s in %s database" % (UpdateHistoryTable.HISTORY_TABLE, self.dump_database))
finally:
if conn is not None:
conn.close()
class DumpGlobal(Operation):
def __init__(self, timestamp, master_datadir, backup_dir):
self.timestamp = timestamp
self.master_datadir = master_datadir
self.backup_dir = backup_dir
def execute(self):
logger.info("Commencing pg_catalog dump")
if self.backup_dir is not None:
global_file = os.path.join(self.backup_dir, DUMP_DIR, DUMP_DATE, "%s%s" % (GLOBAL_PREFIX, self.timestamp))
else:
global_file = os.path.join(self.master_datadir, DUMP_DIR, DUMP_DATE, "%s%s" % (GLOBAL_PREFIX, self.timestamp))
Command('Dump global objects',
"pg_dumpall -g --gp-syntax > %s" % global_file).run(validateAfter=True)
class DumpConfig(Operation):
# TODO: Should we really just give up if one of the tars fails?
# TODO: WorkerPool
def __init__(self, backup_dir, master_datadir, master_port):
self.backup_dir = backup_dir
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
config_backup_file = "gp_master_config_files_%s.tar" % timestamp
if self.backup_dir is not None:
path = os.path.join(self.backup_dir, DUMP_DIR, DUMP_DATE, config_backup_file)
else:
path = os.path.join(self.master_datadir, DUMP_DIR, DUMP_DATE, config_backup_file)
logger.info("Dumping master config files")
Command("Dumping master configuration files",
"tar cf %s %s/*.conf" % (path, self.master_datadir)).run(validateAfter=True)
logger.info("Dumping segment config files")
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
primaries = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in primaries:
config_backup_file = "gp_segment_config_files_0_%d_%s.tar" % (seg.getSegmentDbId(), timestamp)
if self.backup_dir is not None:
path = os.path.join(self.backup_dir, DUMP_DIR, DUMP_DATE, config_backup_file)
else:
path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, DUMP_DATE, config_backup_file)
host = seg.getSegmentHostName()
Command("Dumping segment config files",
"tar cf %s %s/*.conf" % (path, seg.getSegmentDataDirectory()),
ctxt=REMOTE,
remoteHost=host).run(validateAfter=True)
class DeleteCurrentDump(Operation):
def __init__(self, timestamp, master_datadir, master_port):
self.timestamp = timestamp
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
try:
DeleteCurrentSegDump(self.timestamp, self.master_datadir).run()
except OSError, e:
logger.warn("Error encountered during deletion of %s on master" % self.timestamp)
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in segs:
try:
RemoteOperation(DeleteCurrentSegDump(self.timestamp, seg.getSegmentDataDirectory()),
seg.getSegmentHostName()).run()
except OSError, e:
logger.warn("Error encountered during deletion of %s on %s" % (self.timestamp, seg.getSegmentHostName()))
class DeleteCurrentSegDump(Operation):
""" TODO: Improve with grouping by host. """
def __init__(self, timestamp, datadir):
self.timestamp = timestamp
self.datadir = datadir
def execute(self):
path = os.path.join(self.datadir, DUMP_DIR, DUMP_DATE)
filenames = ListFilesByPattern(path, "*%s*" % self.timestamp).run()
for filename in filenames:
RemoveFile(os.path.join(path, filename)).run()
class DeleteOldestDumps(Operation):
# TODO: This Operation isn't consuming backup_dir. Should it?
def __init__(self, master_datadir, master_port):
self.master_datadir = master_datadir
self.master_port = master_port
def execute(self):
dburl = dbconn.DbURL(port=self.master_port)
old_dates = ListFiles(os.path.join(self.master_datadir, DUMP_DIR)).run()
try:
old_dates.remove(DUMP_DATE)
except ValueError, e: # DUMP_DATE was not found in old_dates
pass
if len(old_dates) == 0:
logger.info("No old backup sets to remove")
return
old_dates.sort()
old_date = old_dates[0]
# This will avoid the problem where we might accidently end up deleting local backup files
logger.info("Preparing to remove dump %s from all hosts" % old_date)
path = os.path.join(self.master_datadir, DUMP_DIR, old_date)
try:
RemoveTree(path).run()
except OSError, e:
logger.warn("Error encountered during deletion of %s" % path)
gparray = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility=True)
primaries = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
for seg in primaries:
path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, old_date)
try:
RemoveRemoteTree(path, seg.getSegmentHostName()).run()
except ExecutionError, e:
logger.warn("Error encountered during deletion of %s on %s" % (path, seg.getSegmentHostName()))
return old_date
class VacuumDatabase(Operation):
# TODO: move this to gppylib.operations.common?
def __init__(self, database, master_port):
self.database = database
self.master_port = master_port
def execute(self):
conn = None
logger.info('Commencing vacuum of %s database, please wait' % self.database)
try:
dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
conn = dbconn.connect(dburl)
cursor = conn.cursor()
cursor.execute("commit") # hack to move drop stmt out of implied transaction
cursor.execute("vacuum")
cursor.close()
except Exception, e:
logger.exception('Error encountered with vacuum of %s database' % self.database)
else:
logger.info('Vacuum of %s completed without error' % self.database)
finally:
if conn is not None:
conn.close()
class MailDumpEvent(Operation):
def __init__(self, subject, message):
self.subject = subject
self.message = message
def execute(self):
if "HOME" not in os.environ or "GPHOME" not in os.environ:
logger.warn("Could not find mail_contacts file. Set $HOME and $GPHOME.")
return
mail_file = os.path.join(os.environ["GPHOME"], "bin", "mail_contacts")
home_mail_file = os.path.join(os.environ["HOME"], "mail_contacts")
contacts_file = None
if CheckFile(home_mail_file).run():
contacts_file = home_mail_file
elif CheckFile(mail_file).run():
contacts_file = mail_file
else:
logger.warn("Found neither %s nor %s" % (mail_file, home_mail_file))
logger.warn("Unable to send dump email notification")
logger.info("To enable email notification, create %s or %s containing required email addresses" % (mail_file, home_mail_file))
return
to_addrs = None
with open(contacts_file, 'r') as f:
to_addrs = [line.strip() for line in f]
MailEvent(subject = self.subject,
message = self.message,
to_addrs = to_addrs).run()
class MailEvent(Operation):
# TODO: move this to gppylib.operations.common?
def __init__(self, subject, message, to_addrs):
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
self.subject = subject
self.message = message
self.to_addrs = to_addrs
def execute(self):
logger.info("Sending mail to %s" % ",".join(self.to_addrs))
cmd = "/bin/mailx" if curr_platform == SUNOS else findCmdInPath('mail')
Command('Sending email',
'echo "%s" | %s -s "%s" %s' % (self.message, cmd, self.subject, " ".join(self.to_addrs))).run(validateAfter=True)
| apache-2.0 |
sergiusens/snapcraft | tests/unit/plugins/python/test_errors.py | 3 | 1444 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from testtools.matchers import Equals
from snapcraft.plugins._python import errors
from tests import unit
class ErrorFormattingTestCase(unit.TestCase):
scenarios = (
(
"PipListInvalidLegacyFormatError",
{
"exception": errors.PipListInvalidLegacyFormatError,
"kwargs": {"output": "test-output"},
"expected_message": (
"Failed to parse Python package list: "
"The returned output is not in the expected format:\n"
"test-output"
),
},
),
)
def test_error_formatting(self):
self.assertThat(
str(self.exception(**self.kwargs)), Equals(self.expected_message)
)
| gpl-3.0 |
fastinetserver/portage-idfetch | pym/_emerge/AsynchronousTask.py | 2 | 3164 | # Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SlotObject import SlotObject
class AsynchronousTask(SlotObject):
"""
Subclasses override _wait() and _poll() so that calls
to public methods can be wrapped for implementing
hooks such as exit listener notification.
Sublasses should call self.wait() to notify exit listeners after
the task is complete and self.returncode has been set.
"""
__slots__ = ("background", "cancelled", "returncode") + \
("_exit_listeners", "_exit_listener_stack", "_start_listeners")
def start(self):
"""
Start an asynchronous task and then return as soon as possible.
"""
self._start_hook()
self._start()
def _start(self):
raise NotImplementedError(self)
def isAlive(self):
return self.returncode is None
def poll(self):
self._wait_hook()
return self._poll()
def _poll(self):
return self.returncode
def wait(self):
if self.returncode is None:
self._wait()
self._wait_hook()
return self.returncode
def _wait(self):
return self.returncode
def cancel(self):
self.cancelled = True
self.wait()
def addStartListener(self, f):
"""
The function will be called with one argument, a reference to self.
"""
if self._start_listeners is None:
self._start_listeners = []
self._start_listeners.append(f)
def removeStartListener(self, f):
if self._start_listeners is None:
return
self._start_listeners.remove(f)
def _start_hook(self):
if self._start_listeners is not None:
start_listeners = self._start_listeners
self._start_listeners = None
for f in start_listeners:
f(self)
def addExitListener(self, f):
"""
The function will be called with one argument, a reference to self.
"""
if self._exit_listeners is None:
self._exit_listeners = []
self._exit_listeners.append(f)
def removeExitListener(self, f):
if self._exit_listeners is None:
if self._exit_listener_stack is not None:
self._exit_listener_stack.remove(f)
return
self._exit_listeners.remove(f)
def _wait_hook(self):
"""
Call this method after the task completes, just before returning
the returncode from wait() or poll(). This hook is
used to trigger exit listeners when the returncode first
becomes available.
"""
if self.returncode is not None and \
self._exit_listeners is not None:
# This prevents recursion, in case one of the
# exit handlers triggers this method again by
# calling wait(). Use a stack that gives
# removeExitListener() an opportunity to consume
# listeners from the stack, before they can get
# called below. This is necessary because a call
# to one exit listener may result in a call to
# removeExitListener() for another listener on
# the stack. That listener needs to be removed
# from the stack since it would be inconsistent
# to call it after it has been been passed into
# removeExitListener().
self._exit_listener_stack = self._exit_listeners
self._exit_listeners = None
self._exit_listener_stack.reverse()
while self._exit_listener_stack:
self._exit_listener_stack.pop()(self)
| gpl-2.0 |
bocaaust/FreshLife | django_project/env/lib/python2.7/site-packages/pip/utils/deprecation.py | 148 | 2239 | """
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class Pending(object):
pass
class RemovedInPip9Warning(PipDeprecationWarning):
pass
class RemovedInPip10Warning(PipDeprecationWarning, Pending):
pass
class Python26DeprecationWarning(PipDeprecationWarning, Pending):
pass
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# PipDeprecationWarnings that are Pending still have at least 2
# versions to go until they are removed so they can just be
# warnings. Otherwise, they will be removed in the very next
# version of pip. We want these to be more obvious so we use the
# ERROR logging level.
if issubclass(category, Pending):
logger.warning(log_message)
else:
logger.error(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
# Enable our Deprecation Warnings
warnings.simplefilter("default", PipDeprecationWarning, append=True)
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
| apache-2.0 |
kampanita/pelisalacarta | python/main-classic/channels/newpct.py | 5 | 19385 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para newpct
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urllib
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[newpct.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, action="submenu" , title="Películas"))
itemlist.append( Item(channel=item.channel, action="submenu" , title="Series"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Anime" , url="http://www.newpct.com/anime/" , viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Documentales" , url="http://www.newpct.com/documentales/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar" ))
return itemlist
def search(item,texto):
logger.info("[newpct.py] search")
texto = texto.replace(" ","+")
item.url = "http://www.newpct.com/buscar-descargas/%s" % (texto)
try:
return buscador(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def buscador(item):
logger.info("[newpct.py] buscador")
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| ","",data)
#<td class="center" style="border-bottom:solid 1px cyan;">14-09-14</td><td style="border-bottom:solid 1px cyan;"><strong><a href="http://www.newpct.com/descargar-pelicula/malefica-3d-sbs/" title="Más información sobre Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"> <span class="searchTerm">Malefica</span> 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]</a></strong></td><td class="center" style="border-bottom:solid 1px cyan;">10.9 GB</td><td style="border-bottom:solid 1px cyan;"><a href="http://tumejorserie.com/descargar/index.php?link=torrents/059784.torrent" title="Descargar Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"><img src="http://newpct.com/v2/imagenes//buttons/download.png"
patron = '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' #createdate
patron += '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' #info
patron += '<a href="([^"]+)" ' #url
patron += 'title="Descargar([^"]+)">' #title
patron += '<img src="([^"]+)"' #thumbnail
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedcreatedate, scrapedinfo, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapedtitle + "(Tamaño:" + scrapedinfo + "--" + scrapedcreatedate+")"
itemlist.append( Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent", thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, folder=True) )
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel=item.channel
videoitem.action="play"
videoitem.folder=False
return itemlist
def submenu(item):
logger.info("[newpct.py] submenu")
itemlist=[]
if item.title == "Películas":
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas DVDRIP-BRRIP Castellano" , url="http://www.newpct.com/peliculas-castellano/peliculas-rip/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas Latino" , url="http://www.newpct.com/peliculas-latino/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Estrenos de Cine Castellano" , url="http://www.newpct.com/peliculas-castellano/estrenos-de-cine/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas Alta Definicion HD" , url="http://www.newpct.com/cine-alta-definicion-hd/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas en 3D HD" , url="http://www.newpct.com/peliculas-en-3d-hd/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas DVDFULL" , url="http://www.newpct.com/peliculas-castellano/peliculas-dvd/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Peliculas V.O.Subtituladas" , url="http://www.newpct.com/peliculas-vo/", viewmode="movie_with_plot"))
else:
itemlist.append( Item(channel=item.channel, action="listado" , title="HDTV Castellano" , url="http://www.newpct.com/series/", category="serie", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Miniseries Castellano" , url="http://www.newpct.com/miniseries-es/", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Series TV - V.O.S.E" , url="http://www.newpct.com/series-vo/", category="serie", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="listado" , title="Últimos Capítulos HD" , url="http://www.newpct.com/series-alta-definicion-hd/", category="serie", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, action="series" , title="Series HD [A-Z]" , url="http://www.newpct.com/index.php?l=torrentListByCategory&subcategory_s=1469&more=listar", category="serie"))
return itemlist
def listado(item):
logger.info("[newpct.py] listado")
itemlist = []
data = scrapertools.cache_page(item.url)
'''
<li>
<a href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>
<div class='boxgrid captionb'>
<img src='http://images.newpct.com/banco_de_imagenes/destacados/038707/la-pequeña-venecia--dvdrip--ac3-5-1-español-castellano--2012-.jpg' alt='Descargar Peliculas Castellano » Películas RIP La Pequeña Venecia [DVDrip][AC3 5.1 Español Castellano][2012]' />
<div class='cover boxcaption'>
<h3>La Pequeña Venecia </h3>
<p>Peliculas Castellano<br/>
Calidad: DVDRIP AC3 5.1<br>
Tamaño: 1.1 GB<br>
Idioma : Español Castellano
</p>
</div>
</div>
</a>
<div id='bot-desc'>
<div id='tinfo'>
<a class='youtube' href='#' rel='gx9EKDC0UFQ' title='Ver Trailer' alt='Ver Trailer'>
<img style='width:25px;' src='http://www.newpct.com/images.inc/images/playm2.gif'></a>
</div>
<div id='tdescargar' ><a class='atdescargar' href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>DESCARGAR</a></div>
</div>
</li>
'''
patron = "<li[^<]+"
patron += "<a href='([^']+)'[^<]+"
patron += "<div class='boxgrid captionb'[^<]+"
patron += "<img src='([^']+)'[^<]+"
patron += "<div class='cover boxcaption'[^<]+"
patron += '<h3>([^<]+)</h3>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedplot in matches:
title = scrapedtitle.strip()
title = unicode( title, "iso-8859-1" , errors="replace" ).encode("utf-8")
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = scrapertools.htmlclean(scrapedplot).strip()
plot = unicode( plot, "iso-8859-1" , errors="replace" ).encode("utf-8")
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
if item.category == "serie":
itemlist.append( Item(channel=item.channel, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot))
else:
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot))
# Página siguiente
'''
GET /include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 HTTP/1.1
Host: www.newpct.com
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0
Accept: */*
Accept-Language: es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate
X-Requested-With: XMLHttpRequest
Referer: http://www.newpct.com/peliculas-castellano/peliculas-rip/
Cookie: adbooth_popunder=5%7CSat%2C%2009%20Mar%202013%2018%3A23%3A22%20GMT
Connection: keep-alive
'''
'''
function orderCategory(type,leter,pag,other)
{
if(leter=='buscar')
{
leter = document.getElementById('word').value;
}
if(type=='todo')
{
document.getElementById('todo').className = "active_todo";
}
if(type=='letter')
{
switch(leter)
{
case '09':
document.getElementById('09').className = "active_num";
break;
default:
document.getElementById(leter).className = "active_a";
break;
}
}
var parametros = {
"type" : type,
"leter" : leter,
"sql" : "SELECT DISTINCT torrentID, torrentCategoryID, torrentCategoryIDR, torrentImageID, torrentName, guid, torrentShortName, torrentLanguage, torrentSize, calidad as calidad_, torrentDescription, torrentViews, rating, n_votos, vistas_hoy, vistas_ayer, vistas_semana, vistas_mes FROM torrentsFiles as t WHERE (torrentStatus = 1 OR torrentStatus = 2) AND (torrentCategoryID IN (1537, 758, 1105, 760, 1225)) ORDER BY torrentDateAdded DESC LIMIT 0, 50",
"pag" : pag,
"tot" : '',
"ban" : '3',
"other": other,
"cate" : '1225'
};
//alert(type+leter);
$('#content-category').html('<div style="margin:100px auto;width:100px;height:100px;"><img src="http://www.newpct.com/images.inc/images/ajax-loader.gif"/></div>');
var page = $(this).attr('data');
var dataString = 'page='+page;
$.ajax({
type: "GET",
url: 'http://www.newpct.com/include.inc/ajax.php/orderCategory.php',
data: parametros,
success: function(data) {
//Cargamos finalmente el contenido deseado
$('#content-category').fadeIn(1000).html(data);
}
});
}
'''
if item.extra!="":
bloque=item.extra
else:
bloque = scrapertools.get_match(data,"function orderCategory(.*?)\}\)\;")
logger.info("bloque="+bloque)
param_type=scrapertools.get_match(data,"<a href='javascript:;' onclick=\"orderCategory\('([^']+)'[^>]+> >> </a>")
logger.info("param_type="+param_type)
param_leter=scrapertools.get_match(data,"<a href='javascript:;' onclick=\"orderCategory\('[^']+','([^']*)'[^>]+> >> </a>")
logger.info("param_leter="+param_leter)
param_pag=scrapertools.get_match(data,"<a href='javascript:;' onclick=\"orderCategory\('[^']+','[^']*','([^']+)'[^>]+> >> </a>")
logger.info("param_pag="+param_pag)
param_total=scrapertools.get_match(bloque,'"total"\s*\:\s*\'([^\']+)')
logger.info("param_sql="+param_total)
param_sql=scrapertools.get_match(bloque,'"sql"\s*\:\s*\'([^\']+)')
logger.info("param_sql="+param_sql)
param_tot=scrapertools.get_match(bloque,"\"tot\"\s*\:\s*'([^']*)'")
logger.info("param_tot="+param_tot)
param_ban=scrapertools.get_match(bloque,"\"ban\"\s*\:\s*'([^']+)'")
logger.info("param_ban="+param_ban)
param_cate=scrapertools.get_match(bloque,"\"cate\"\s*\:\s*'([^']+)'")
logger.info("param_cate="+param_cate)
base_url = scrapertools.get_match(bloque,"url\s*\:\s*'([^']+)'")
base_url = re.sub("../..", "http://www.newpct.com", base_url, count=1)
logger.info("base_url="+base_url)
#http://www.newpct.com/include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225
url_next_page = base_url + "?" + urllib.urlencode( {"total": param_total, "type": param_type, "leter": param_leter, "sql": param_sql, "pag": param_pag, "tot": param_tot, "ban": param_ban, "cate": param_cate} )
logger.info("url_next_page="+url_next_page)
if item.category == "serie":
itemlist.append( Item(channel=item.channel, action="listado" , title=">> Página siguiente" , url=url_next_page, extra=bloque, category="serie", viewmode="movie_with_plot"))
else:
itemlist.append( Item(channel=item.channel, action="listado" , title=">> Página siguiente" , url=url_next_page, extra=bloque, viewmode="movie_with_plot"))
return itemlist
def series(item):
logger.info("[newpct.py] series")
itemlist=[]
#Lista menú Series de la A-Z
data = scrapertools.cache_page(item.url)
patron = '<div id="content-abc">(.*?)<\/div>'
data = re.compile(patron,re.DOTALL|re.M).findall(data)
patron = 'id="([^"]+)".*?>([^"]+)<\/a>'
matches = re.compile(patron,re.DOTALL|re.M).findall(data[0])
for id, scrapedtitle in matches:
url_base = "http://www.newpct.com/include.inc/ajax.php/orderCategory.php?total=9&type=letter&leter=%s&sql=+%09%09SELECT++t.torrentID%2C++%09%09%09%09t.torrentCategoryID%2C++%09%09%09%09t.torrentCategoryIDR%2C++%09%09%09%09t.torrentImageID%2C++%09%09%09%09t.torrentName%2C++%09%09%09%09t.guid%2C++%09%09%09%09t.torrentShortName%2C+%09%09%09%09t.torrentLanguage%2C+%09%09%09%09t.torrentSize%2C+%09%09%09%09t.calidad+as+calidad_%2C+%09%09%09%09t.torrentDescription%2C+%09%09%09%09t.torrentViews%2C+%09%09%09%09t.rating%2C+%09%09%09%09t.n_votos%2C+%09%09%09%09t.vistas_hoy%2C+%09%09%09%09t.vistas_ayer%2C+%09%09%09%09t.vistas_semana%2C+%09%09%09%09t.vistas_mes%2C+%09%09%09%09t.imagen+FROM+torrentsFiles+as+t++%09%09LEFT+JOIN+torrentsCategories+as+tc+ON+(t.torrentCategoryID+%3D+tc.categoryID)++%09%09INNER+JOIN++%09%09(+%09%09%09SELECT+torrentID+%09%09%09FROM+torrentsFiles++%09%09%09WHERE++torrentCategoryIDR+%3D+1469+%09%09%09ORDER+BY+torrentID+DESC+%09%09)t1+ON+t1.torrentID+%3D+t.torrentID+WHERE+(t.torrentStatus+%3D+1+OR+t.torrentStatus+%3D+2)+AND+t.home_active+%3D+0++AND+tc.categoryIDR+%3D+1469+GROUP+BY+t.torrentCategoryID+ORDER+BY+t.torrentID+DESC+LIMIT+0%2C+50&pag=&tot=&ban=3&cate=1469"
scrapedurl = url_base.replace("%s", id)
if id!="todo": itemlist.append( Item(channel=item.channel, action="listaseries" , title=scrapedtitle , url=scrapedurl, folder=True))
return itemlist
def listaseries(item):
logger.info("[newpct.py] listaseries")
itemlist=[]
data = scrapertools.downloadpageGzip(item.url)
patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>"
matches = re.compile(patron,re.DOTALL|re.M).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
itemlist.append( Item(channel=item.channel, action="episodios" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
return itemlist
def episodios(item):
logger.info("[newpct.py] episodios")
itemlist=[]
data = scrapertools.cache_page(item.url)
patron = "<ul style='display:none;'.*?>(.*?)<\/ul>"
data = re.compile(patron,re.DOTALL|re.M).findall(data)
patron = "<a href='([^']+)'.*?title='([^']+)'"
for index in range(len(data)):
matches = re.compile(patron,re.DOTALL|re.M).findall(data[index])
for scrapedurl, scrapedtitle in matches:
itemlist.append( Item(channel=item.channel, action="findvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, folder=True))
return itemlist
def findvideos(item):
logger.info("[newpct.py] findvideos")
itemlist=[]
data = scrapertools.cache_page(item.url)
#<span id='content-torrent'> <a href='http://tumejorjuego.com/descargar/index.php?link=descargar/torrent/58591/el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014.html' rel='nofollow' id='58591' title='el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014' class='external-url' target='_blank'>
torrent_url = scrapertools.find_single_match(data,"<span id='content-torrent'[^<]+<a href='([^']+)'")
if torrent_url!="":
itemlist.append( Item(channel=item.channel, action="play" , title="Torrent" , url=torrent_url, server="torrent"))
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel=item.channel
videoitem.action="play"
videoitem.folder=False
videoitem.title = "["+videoitem.server+"]"
return itemlist
| gpl-3.0 |
yusofm/scrapy | tests/test_utils_datatypes.py | 144 | 3592 | import copy
import unittest
from scrapy.utils.datatypes import CaselessDict
__doctests__ = ['scrapy.utils.datatypes']
class CaselessDictTest(unittest.TestCase):
def test_init(self):
seq = {'red': 1, 'black': 3}
d = CaselessDict(seq)
self.assertEqual(d['red'], 1)
self.assertEqual(d['black'], 3)
seq = (('red', 1), ('black', 3))
d = CaselessDict(seq)
self.assertEqual(d['red'], 1)
self.assertEqual(d['black'], 3)
def test_caseless(self):
d = CaselessDict()
d['key_Lower'] = 1
self.assertEqual(d['KEy_loWer'], 1)
self.assertEqual(d.get('KEy_loWer'), 1)
d['KEY_LOWER'] = 3
self.assertEqual(d['key_Lower'], 3)
self.assertEqual(d.get('key_Lower'), 3)
def test_delete(self):
d = CaselessDict({'key_lower': 1})
del d['key_LOWER']
self.assertRaises(KeyError, d.__getitem__, 'key_LOWER')
self.assertRaises(KeyError, d.__getitem__, 'key_lower')
def test_getdefault(self):
d = CaselessDict()
self.assertEqual(d.get('c', 5), 5)
d['c'] = 10
self.assertEqual(d.get('c', 5), 10)
def test_setdefault(self):
d = CaselessDict({'a': 1, 'b': 2})
r = d.setdefault('A', 5)
self.assertEqual(r, 1)
self.assertEqual(d['A'], 1)
r = d.setdefault('c', 5)
self.assertEqual(r, 5)
self.assertEqual(d['C'], 5)
def test_fromkeys(self):
keys = ('a', 'b')
d = CaselessDict.fromkeys(keys)
self.assertEqual(d['A'], None)
self.assertEqual(d['B'], None)
d = CaselessDict.fromkeys(keys, 1)
self.assertEqual(d['A'], 1)
self.assertEqual(d['B'], 1)
instance = CaselessDict()
d = instance.fromkeys(keys)
self.assertEqual(d['A'], None)
self.assertEqual(d['B'], None)
d = instance.fromkeys(keys, 1)
self.assertEqual(d['A'], 1)
self.assertEqual(d['B'], 1)
def test_contains(self):
d = CaselessDict()
d['a'] = 1
assert 'a' in d
def test_pop(self):
d = CaselessDict()
d['a'] = 1
self.assertEqual(d.pop('A'), 1)
self.assertRaises(KeyError, d.pop, 'A')
def test_normkey(self):
class MyDict(CaselessDict):
def normkey(self, key):
return key.title()
d = MyDict()
d['key-one'] = 2
self.assertEqual(list(d.keys()), ['Key-One'])
def test_normvalue(self):
class MyDict(CaselessDict):
def normvalue(self, value):
if value is not None:
return value + 1
d = MyDict({'key': 1})
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d['key'] = 1
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d.setdefault('key', 1)
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict()
d.update({'key': 1})
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
d = MyDict.fromkeys(('key',), 1)
self.assertEqual(d['key'], 2)
self.assertEqual(d.get('key'), 2)
def test_copy(self):
h1 = CaselessDict({'header1': 'value'})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.get('header1'), h2.get('header1'))
assert isinstance(h2, CaselessDict)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
jalr/privacyidea | privacyidea/lib/tokens/smstoken.py | 1 | 16278 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# 2015-05-24 Add more detailed description
# Cornelius Kölbel <cornelius.koelbel@netknights.it>
# 2015-01-30 Adapt for migration to flask
# Cornelius Kölbel <cornelius@privacyidea.org>
#
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """The SMS token sends an SMS containing an OTP via some kind of
gateway. The gateways can be an SMTP or HTTP gateway or the special sipgate
protocol.
The Gateways are defined in the SMSProvider Modules.
This code is tested in tests/test_lib_tokens_sms
"""
import datetime
import traceback
from privacyidea.api.lib.utils import getParam
from privacyidea.api.lib.utils import required
from privacyidea.lib.config import get_from_config
from privacyidea.lib.policy import SCOPE
from privacyidea.lib.log import log_with
from privacyidea.lib.smsprovider.SMSProvider import get_sms_provider_class
from json import loads
from gettext import gettext as _
from privacyidea.lib.tokens.hotptoken import HotpTokenClass
from privacyidea.models import Challenge
from privacyidea.lib.decorators import check_token_locked
import logging
from privacyidea.lib.policydecorators import challenge_response_allowed
log = logging.getLogger(__name__)
keylen = {'sha1': 20,
'sha256': 32,
'sha512': 64}
class SMSACTION(object):
SMSTEXT = "smstext"
SMSAUTO = "smsautosend"
class SmsTokenClass(HotpTokenClass):
"""
The SMS token sends an SMS containing an OTP via some kind of
gateway. The gateways can be an SMTP or HTTP gateway or the special sipgate
protocol. The Gateways are defined in the SMSProvider Modules.
The SMS token is a challenge response token. I.e. the first request needs
to contain the correct OTP PIN. If the OTP PIN is correct, the sending of
the SMS is triggered. The second authentication must either contain the
OTP PIN and the OTP value or the transaction_id and the OTP value.
**Example 1st Authentication Request**:
.. sourcecode:: http
POST /validate/check HTTP/1.1
Host: example.com
Accept: application/json
user=cornelius
pass=otppin
**Example 1st response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
"transaction_id": "xyz"
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": false
},
"version": "privacyIDEA unknown"
}
After this, the SMS is triggered. When the SMS is received the second part
of authentication looks like this:
**Example 2nd Authentication Request**:
.. sourcecode:: http
POST /validate/check HTTP/1.1
Host: example.com
Accept: application/json
user=cornelius
transaction_id=xyz
pass=otppin
**Example 1st response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"detail": {
},
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": true
},
"version": "privacyIDEA unknown"
}
"""
def __init__(self, db_token):
HotpTokenClass.__init__(self, db_token)
self.set_type(u"sms")
self.mode = ['challenge']
self.hKeyRequired = True
@staticmethod
def get_class_type():
"""
return the generic token class identifier
"""
return "sms"
@staticmethod
def get_class_prefix():
return "PISM"
@staticmethod
def get_class_info(key=None, ret='all'):
"""
returns all or a subtree of the token definition
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype : s.o.
"""
res = {'type': 'sms',
'title': _('SMS Token'),
'description':
_('SMS: Send a One Time Password to the users mobile '
'phone.'),
'user': ['enroll'],
# This tokentype is enrollable in the UI for...
'ui_enroll': ["admin", "user"],
'policy': {
SCOPE.AUTH: {
SMSACTION.SMSTEXT: {
'type': 'str',
'desc': _('The text that will be send via SMS for'
' an SMS token. Use <otp> and <serial> '
'as parameters.')},
SMSACTION.SMSAUTO: {
'type': 'bool',
'desc': _('If set, a new SMS OTP will be sent '
'after successful authentication with '
'one SMS OTP.')},
}
},
}
if key is not None and key in res:
ret = res.get(key)
else:
if ret == 'all':
ret = res
return ret
@log_with(log)
def update(self, param, reset_failcount=True):
"""
process initialization parameters
:param param: dict of initialization parameters
:type param: dict
:return: nothing
"""
# specific - phone
phone = getParam(param, "phone", required)
self.add_tokeninfo("phone", phone)
# in case of the sms token, only the server must know the otpkey
# thus if none is provided, we let create one (in the TokenClass)
if "genkey" not in param and "otpkey" not in param:
param['genkey'] = 1
HotpTokenClass.update(self, param, reset_failcount)
@log_with(log)
def is_challenge_request(self, passw, user=None, options=None):
"""
check, if the request would start a challenge
We need to define the function again, to get rid of the
is_challenge_request-decorator of the HOTP-Token
:param passw: password, which might be pin or pin+otp
:param options: dictionary of additional request parameters
:return: returns true or false
"""
return self.check_pin(passw, user=user, options=options)
@log_with(log)
def create_challenge(self, transactionid=None, options=None):
"""
create a challenge, which is submitted to the user
:param transactionid: the id of this challenge
:param options: the request context parameters / data
:return: tuple of (bool, message and data)
bool, if submit was successful
message is submitted to the user
data is preserved in the challenge
attributes - additional attributes, which are displayed in the
output
"""
success = False
sms = ""
options = options or {}
return_message = "Enter the OTP from the SMS:"
attributes = {'state': transactionid}
validity = self._get_sms_timeout()
if self.is_active() is True:
counter = self.get_otp_count()
log.debug("counter={0!r}".format(counter))
self.inc_otp_counter(counter, reset=False)
# At this point we must not bail out in case of an
# Gateway error, since checkPIN is successful. A bail
# out would cancel the checking of the other tokens
try:
message_template = self._get_sms_text(options)
success, sent_message = self._send_sms(
message=message_template)
# Create the challenge in the database
db_challenge = Challenge(self.token.serial,
transaction_id=transactionid,
challenge=options.get("challenge"),
session=options.get("session"),
validitytime=validity)
db_challenge.save()
transactionid = transactionid or db_challenge.transaction_id
except Exception as e:
info = ("The PIN was correct, but the "
"SMS could not be sent: %r" % e)
log.warning(info)
return_message = info
validity = self._get_sms_timeout()
expiry_date = datetime.datetime.now() + \
datetime.timedelta(seconds=validity)
attributes['valid_until'] = "{0!s}".format(expiry_date)
return success, return_message, transactionid, attributes
@log_with(log)
@check_token_locked
def check_otp(self, anOtpVal, counter=None, window=None, options=None):
"""
check the otpval of a token against a given counter
and the window
:param passw: the to be verified passw/pin
:type passw: string
:return: counter if found, -1 if not found
:rtype: int
"""
options = options or {}
ret = HotpTokenClass.check_otp(self, anOtpVal, counter, window, options)
if ret >= 0 and self._get_auto_sms(options):
message = self._get_sms_text(options)
self.inc_otp_counter(ret, reset=False)
success, message = self._send_sms(message=message)
log.debug("AutoSMS: send new SMS: {0!s}".format(success))
log.debug("AutoSMS: {0!s}".format(message))
return ret
@log_with(log)
def _send_sms(self, message="<otp>"):
"""
send sms
:param message: the sms submit message - could contain placeholders
like <otp> or <serial>
:type message: string
:return: submitted message
:rtype: string
"""
ret = None
phone = self.get_tokeninfo("phone")
otp = self.get_otp()[2]
serial = self.get_serial()
message = message.replace("<otp>", otp)
message = message.replace("<serial>", serial)
log.debug("sending SMS to phone number {0!s} ".format(phone))
(SMSProvider, SMSProviderClass) = self._get_sms_provider()
log.debug("smsprovider: {0!s}, class: {1!s}".format(SMSProvider,
SMSProviderClass))
try:
sms = get_sms_provider_class(SMSProvider, SMSProviderClass)()
except Exception as exc:
log.error("Failed to load SMSProvider: {0!r}".format(exc))
log.debug("{0!s}".format(traceback.format_exc()))
raise exc
try:
# now we need the config from the env
log.debug("loading SMS configuration for class {0!s}".format(sms))
config = self._get_sms_provider_config()
log.debug("config: {0!r}".format(config))
sms.load_config(config)
except Exception as exc:
log.error("Failed to load sms.providerConfig: {0!r}".format(exc))
log.debug("{0!s}".format(traceback.format_exc()))
raise Exception("Failed to load sms.providerConfig: {0!r}".format(exc))
log.debug("submitMessage: {0!r}, to phone {1!r}".format(message, phone))
ret = sms.submit_message(phone, message)
return ret, message
@staticmethod
@log_with(log)
def _get_sms_provider():
"""
get the SMS Provider class definition
:return: tuple of SMSProvider and Provider Class as string
:rtype: tuple of (string, string)
"""
smsProvider = get_from_config("sms.provider",
default="privacyidea.lib.smsprovider."
"HttpSMSProvider.HttpSMSProvider")
(SMSProvider, SMSProviderClass) = smsProvider.rsplit(".", 1)
return SMSProvider, SMSProviderClass
@staticmethod
@log_with(log)
def _get_sms_provider_config():
"""
load the defined sms provider config definition
:return: dict of the sms provider definition
:rtype: dict
"""
tConfig = get_from_config("sms.providerConfig", "{}")
config = loads(tConfig)
return config
@staticmethod
@log_with(log)
def _get_sms_timeout():
"""
get the challenge time is in the specified range
:return: the defined validation timeout in seconds
:rtype: int
"""
try:
timeout = int(get_from_config("sms.providerTimeout", 5 * 60))
except Exception as ex: # pragma: no cover
log.warning("SMSProviderTimeout: value error {0!r} - reset to 5*60".format((ex)))
timeout = 5 * 60
return timeout
@staticmethod
def _get_sms_text(options):
"""
This returns the SMSTEXT from the policy "smstext"
:param options: contains user and g object.
:optins type: dict
:return: Message template
:rtype: basestring
"""
message = "<otp>"
g = options.get("g")
username = None
realm = None
user_object = options.get("user")
if user_object:
username = user_object.login
realm = user_object.realm
if g:
clientip = options.get("clientip")
policy_object = g.policy_object
messages = policy_object.\
get_action_values(action=SMSACTION.SMSTEXT,
scope=SCOPE.AUTH,
realm=realm,
user=username,
client=clientip,
unique=True,
allow_white_space_in_action=True)
if len(messages) == 1:
message = messages[0]
return message
@staticmethod
def _get_auto_sms(options):
"""
This returns the AUTOSMS setting.
:param options: contains user and g object.
:optins type: dict
:return: True if an SMS should be sent automatically
:rtype: bool
"""
autosms = False
g = options.get("g")
user_object = options.get("user")
username = None
realm = None
if user_object:
username = user_object.login
realm = user_object.realm
if g:
clientip = options.get("clientip")
policy_object = g.policy_object
autosmspol = policy_object.\
get_policies(action=SMSACTION.SMSAUTO,
scope=SCOPE.AUTH,
realm=realm,
user=username,
client=clientip, active=True)
autosms = len(autosmspol) >= 1
return autosms
| agpl-3.0 |
GbalsaC/bitnamiP | venv/src/django-pipeline/tests/tests/compiler.py | 4 | 1182 | from django.test import TestCase
from pipeline.conf import settings
from pipeline.compilers import Compiler, CompilerBase
class DummyCompiler(CompilerBase):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
return
class CompilerTest(TestCase):
def setUp(self):
self.compiler = Compiler()
self.old_compilers = settings.PIPELINE_COMPILERS
settings.PIPELINE_COMPILERS = ['tests.tests.compiler.DummyCompiler']
def test_output_path(self):
output_path = self.compiler.output_path("js/helpers.coffee", "js")
self.assertEquals(output_path, "js/helpers.js")
def test_compilers_class(self):
compilers_class = self.compiler.compilers
self.assertEquals(compilers_class[0], DummyCompiler)
def test_compile(self):
paths = self.compiler.compile([
'js/dummy.coffee',
'js/application.js',
])
self.assertEquals(['js/dummy.js', 'js/application.js'], paths)
def tearDown(self):
settings.PIPELINE_COMPILERS = self.old_compilers
| agpl-3.0 |
sanyaade-teachings/oppia | core/domain/rights_manager_test.py | 29 | 14025 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes and methods relating to user rights."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
import test_utils
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup('c@example.com', 'C')
self.signup('d@example.com', 'D')
self.signup('e@example.com', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
self.EXP_ID = 'exp_id'
def test_demo_exploration(self):
exp_services.load_demo('1')
self.assertTrue(rights_manager.Actor(self.user_id_a).can_play('1'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_view('1'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_edit('1'))
self.assertFalse(rights_manager.Actor(self.user_id_a).can_delete('1'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_play('1'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_view('1'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_edit('1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete('1'))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
self.assertTrue(rights_manager.Actor(self.user_id_a).can_play('3'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_view('3'))
self.assertTrue(rights_manager.Actor(self.user_id_a).can_edit('3'))
self.assertFalse(rights_manager.Actor(self.user_id_a).can_delete('3'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_play('3'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_view('3'))
self.assertTrue(rights_manager.Actor(self.user_id_admin).can_edit('3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete('3'))
def test_ownership(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(self.EXP_ID))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_inviting_collaborator(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_inviting_playtester(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(self.EXP_ID))
def test_setting_rights(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role(
self.user_id_b, self.EXP_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_unpublish(self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
def test_cannot_delete_published_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(self.EXP_ID))
def test_can_unpublish_and_delete_published_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(self.EXP_ID))
def test_cannot_unpublish_exploration_after_edited(self):
# User A creates an exploration, marks it private.
# User A publishes the exploration.
# User B submits a change.
# User A cannot unpublish the exploration.
pass
def test_anyone_can_submit_a_fix(self):
# User A creates an exploration, marks it private.
# User A submits a change.
# User B submits a change.
pass
def test_can_publicize_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_publicize(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_publicize(
self.EXP_ID))
def test_changing_viewability(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(self.EXP_ID))
with self.assertRaisesRegexp(Exception, 'already the current value'):
rights_manager.set_private_viewability(
self.user_id_a, self.EXP_ID, False)
with self.assertRaisesRegexp(Exception, 'cannot be changed'):
rights_manager.set_private_viewability(
self.user_id_b, self.EXP_ID, True)
rights_manager.set_private_viewability(
self.user_id_a, self.EXP_ID, True)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
rights_manager.set_private_viewability(
self.user_id_a, self.EXP_ID, False)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(self.EXP_ID))
| apache-2.0 |
megaserg/pants | tests/python/pants_test/goal/test_run_tracker.py | 14 | 1665 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import BaseHTTPServer
import json
import threading
import urlparse
from pants.goal.run_tracker import RunTracker
from pants_test.base_test import BaseTest
class RunTrackerTest(BaseTest):
def test_upload_stats(self):
stats = {'stats': {'foo': 'bar', 'baz': 42}}
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(handler):
try:
self.assertEquals('/upload', handler.path)
self.assertEquals('application/x-www-form-urlencoded', handler.headers['Content-type'])
length = int(handler.headers['Content-Length'])
post_data = urlparse.parse_qs(handler.rfile.read(length).decode('utf-8'))
decoded_post_data = {k: json.loads(v[0]) for k, v in post_data.items()}
self.assertEquals(stats, decoded_post_data)
handler.send_response(200)
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ('', 0)
server = BaseHTTPServer.HTTPServer(server_address, Handler)
host, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.assertTrue(RunTracker.post_stats('http://{}:{}/upload'.format(host, port), stats))
server.shutdown()
server.server_close()
| apache-2.0 |
nhicher/ansible | lib/ansible/modules/storage/netapp/na_elementsw_ldap.py | 9 | 8686 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_ldap
short_description: NetApp Element Software Manage ldap admin users
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Enable, disable ldap, and add ldap users
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
authType:
description:
- Identifies which user authentication method to use.
choices: ['DirectBind', 'SearchAndBind']
groupSearchBaseDn:
description:
- The base DN of the tree to start the group search (will do a subtree search from here)
groupSearchType:
description:
- Controls the default group search filter used
choices: ['NoGroup', 'ActiveDirectory', 'MemberDN']
serverURIs:
description:
- A comma-separated list of LDAP server URIs
userSearchBaseDN:
description:
- The base DN of the tree to start the search (will do a subtree search from here)
searchBindDN:
description:
- A dully qualified DN to log in with to perform an LDAp search for the user (needs read access to the LDAP directory).
searchBindPassword:
description:
- The password for the searchBindDN account used for searching
userSearchFilter:
description:
- the LDAP Filter to use
userDNTemplate:
description:
- A string that is used form a fully qualified user DN.
groupSearchCustomFilter:
description:
- For use with the CustomFilter Search type
'''
EXAMPLES = """
- name: disable ldap authentication
na_elementsw_ldap:
state: absent
username: "{{ admin username }}"
password: "{{ admin password }}"
hostname: "{{ hostname }}"
- name: Enable ldap authentication
na_elementsw_ldap:
state: present
username: "{{ admin username }}"
password: "{{ admin password }}"
hostname: "{{ hostname }}"
authType: DirectBind
serverURIs: ldap://svmdurlabesx01spd_ldapclnt
groupSearchType: MemberDN
userDNTemplate: uid=%USERNAME%,cn=users,cn=accounts,dc=corp,dc="{{ company name }}",dc=com
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
import solidfire.common
except:
HAS_SF_SDK = False
class NetappElementLdap(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
authType=dict(required=False, choices=['DirectBind', 'SearchAndBind']),
groupSearchBaseDn=dict(required=False, type=str),
groupSearchType=dict(required=False, choices=['NoGroup', 'ActiveDirectory', 'MemberDN']),
serverURIs=dict(required=False, type=str),
userSearchBaseDN=dict(required=False, type=str),
searchBindDN=dict(required=False, type=str),
searchBindPassword=dict(required=False, type=str, no_log=True),
userSearchFilter=dict(required=False, type=str),
userDNTemplate=dict(required=False, type=str),
groupSearchCustomFilter=dict(required=False, type=str)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
param = self.module.params
# set up state variables
self.state = param['state']
self.authType = param['authType']
self.groupSearchBaseDn = param['groupSearchBaseDn']
self.groupSearchType = param['groupSearchType']
self.serverURIs = param['serverURIs']
if self.serverURIs is not None:
self.serverURIs = self.serverURIs.split(',')
self.userSearchBaseDN = param['userSearchBaseDN']
self.searchBindDN = param['searchBindDN']
self.searchBindPassword = param['searchBindPassword']
self.userSearchFilter = param['userSearchFilter']
self.userDNTemplate = param['userDNTemplate']
self.groupSearchCustomFilter = param['groupSearchCustomFilter']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_ldap_configuration(self):
"""
Return ldap configuration if found
:return: Details about the ldap configuration. None if not found.
:rtype: solidfire.models.GetLdapConfigurationResult
"""
ldap_config = self.sfe.get_ldap_configuration()
return ldap_config
def enable_ldap(self):
"""
Enable LDAP
:return: nothing
"""
try:
self.sfe.enable_ldap_authentication(self.serverURIs, auth_type=self.authType,
group_search_base_dn=self.groupSearchBaseDn,
group_search_type=self.groupSearchType,
group_search_custom_filter=self.groupSearchCustomFilter,
search_bind_dn=self.searchBindDN,
search_bind_password=self.searchBindPassword,
user_search_base_dn=self.userSearchBaseDN,
user_search_filter=self.userSearchFilter,
user_dntemplate=self.userDNTemplate)
except solidfire.common.ApiServerError as error:
self.module.fail_json(msg='Error enabling LDAP %s: %s' % (self.account_id, to_native(error)),
exception=traceback.format_exc())
def check_config(self, ldap_config):
"""
Check to see if the ldap config has been modified.
:param ldap_config: The LDAP configuration
:return: False if the config is the same as the playbook, True if it is not
"""
if self.authType != ldap_config.ldap_configuration.auth_type:
return True
if self.serverURIs != ldap_config.ldap_configuration.server_uris:
return True
if self.groupSearchBaseDn != ldap_config.ldap_configuration.group_search_base_dn:
return True
if self.groupSearchType != ldap_config.ldap_configuration.group_search_type:
return True
if self.groupSearchCustomFilter != ldap_config.ldap_configuration.group_search_custom_filter:
return True
if self.searchBindDN != ldap_config.ldap_configuration.search_bind_dn:
return True
if self.searchBindPassword != ldap_config.ldap_configuration.search_bind_password:
return True
if self.userSearchBaseDN != ldap_config.ldap_configuration.user_search_base_dn:
return True
if self.userSearchFilter != ldap_config.ldap_configuration.user_search_filter:
return True
if self.userDNTemplate != ldap_config.ldap_configuration.user_dntemplate:
return True
return False
def apply(self):
changed = False
ldap_config = self.get_ldap_configuration()
if self.state == 'absent':
if ldap_config and ldap_config.ldap_configuration.enabled:
changed = True
if self.state == 'present' and self.check_config(ldap_config):
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
self.enable_ldap()
elif self.state == 'absent':
self.sfe.disable_ldap_authentication()
self.module.exit_json(changed=changed)
def main():
v = NetappElementLdap()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
mrkm4ntr/incubator-airflow | airflow/api/common/experimental/get_task_instance.py | 4 | 1610 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Instance APIs."""
from datetime import datetime
from airflow.api.common.experimental import check_and_get_dag, check_and_get_dagrun
from airflow.exceptions import TaskInstanceNotFound
from airflow.models import TaskInstance
def get_task_instance(dag_id: str, task_id: str, execution_date: datetime) -> TaskInstance:
"""Return the task object identified by the given dag_id and task_id."""
dag = check_and_get_dag(dag_id, task_id)
dagrun = check_and_get_dagrun(dag=dag, execution_date=execution_date)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = f'Task {task_id} instance for date {execution_date} not found'
raise TaskInstanceNotFound(error_message)
return task_instance
| apache-2.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/dataset_preproc/data_generators/gene_expression.py | 7 | 9765 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gene expression problems.
Inputs are bases ACTG (with indices assigned in that order).
Requires the h5py library.
File format expected:
* h5 file
* h5 datasets should include {train, valid, test}_{in, na, out}, which will
map to inputs, targets mask, and targets for the train, dev, and test
datasets.
* Each record in *_in is a bool 2-D numpy array with one-hot encoded base
pairs with shape [num_input_timesteps, 4]. The base order is ACTG.
* Each record in *_na is a bool 1-D numpy array with shape
[num_output_timesteps].
* Each record in *_out is a float 2-D numpy array with shape
[num_output_timesteps, num_predictions].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import multiprocessing as mp
import os
import h5py
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import dna_encoder
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
MAX_CONCURRENT_PROCESSES = 10
class GeneExpressionProblem(problem.Problem):
"""Base Problem for gene expression datasets."""
@property
def download_url(self):
raise NotImplementedError()
@property
def h5_file(self):
raise NotImplementedError()
@property
def num_output_predictions(self):
"""Number of float predictions per timestep."""
return 10
@property
def chunk_size(self):
return 4
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": dna_encoder.DNAEncoder(chunk_size=self.chunk_size),
# TODO(rsepassi): RealEncoder?
"targets": text_encoder.TextEncoder()
}
@property
def num_shards(self):
return 100
def generate_data(self, data_dir, tmp_dir, task_id=-1):
try:
# Download source data if download_url specified
h5_filepath = generator_utils.maybe_download(tmp_dir, self.h5_file,
self.download_url)
except NotImplementedError:
# Otherwise, look for it locally
h5_filepath = os.path.join(tmp_dir, self.h5_file)
with h5py.File(h5_filepath, "r") as h5_file:
num_train_examples = h5_file["train_in"].len()
num_dev_examples = h5_file["valid_in"].len()
num_test_examples = h5_file["test_in"].len()
# Collect all_filepaths to later shuffle
all_filepaths = []
# Collect created shard processes to start and join
processes = []
datasets = [(self.training_filepaths, self.num_shards, "train",
num_train_examples), (self.dev_filepaths, 10, "valid",
num_dev_examples),
(self.test_filepaths, 10, "test", num_test_examples)]
for fname_fn, nshards, key_prefix, num_examples in datasets:
outfiles = fname_fn(data_dir, nshards, shuffled=False)
all_filepaths.extend(outfiles)
for start_idx, end_idx, outfile in generate_shard_args(
outfiles, num_examples):
p = mp.Process(
target=generate_dataset,
args=(h5_filepath, key_prefix, [outfile], self.chunk_size,
start_idx, end_idx))
processes.append(p)
# 1 per training shard + 10 for dev + 10 for test
assert len(processes) == self.num_shards + 20
# Start and wait for processes in batches
num_batches = int(
math.ceil(float(len(processes)) / MAX_CONCURRENT_PROCESSES))
for i in range(num_batches):
start = i * MAX_CONCURRENT_PROCESSES
end = start + MAX_CONCURRENT_PROCESSES
current = processes[start:end]
for p in current:
p.start()
for p in current:
p.join()
# Shuffle
generator_utils.shuffle_dataset(all_filepaths)
def hparams(self, defaults, unused_model_hparams):
p = defaults
vocab_size = self._encoders["inputs"].vocab_size
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, vocab_size)}
p.target_modality = ("%s:log_poisson_loss" % registry.Modalities.REAL,
self.num_output_predictions)
p.input_space_id = problem.SpaceID.DNA
p.target_space_id = problem.SpaceID.REAL
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, mode, unused_hparams):
del mode
# Reshape targets to contain num_output_predictions per output timestep
example["targets"] = tf.reshape(example["targets"],
[-1, 1, self.num_output_predictions])
# Slice off EOS - not needed, and messes up the GeneExpressionConv model
# which expects the input length to be a multiple of the target length.
example["inputs"] = example["inputs"][:-1]
return example
def eval_metrics(self):
return [metrics.Metrics.LOG_POISSON, metrics.Metrics.R2]
@registry.register_problem
class GenomicsExpressionCage10(GeneExpressionProblem):
@property
def download_url(self):
return "https://storage.googleapis.com/262k_binned/cage10_l262k_w128.h5"
@property
def h5_file(self):
return "cage10.h5"
@registry.register_problem
class GenomicsExpressionGm12878(GeneExpressionProblem):
@property
def download_url(self):
return "https://storage.googleapis.com/262k_binned/gm12878_l262k_w128.h5"
@property
def h5_file(self):
return "gm12878.h5"
@registry.register_problem
class GenomicsExpressionL262k(GeneExpressionProblem):
@property
def h5_file(self):
return "l262k_w128.h5"
def generate_shard_args(outfiles, num_examples):
"""Generate start and end indices per outfile."""
num_shards = len(outfiles)
num_examples_per_shard = num_examples // num_shards
start_idxs = [i * num_examples_per_shard for i in range(num_shards)]
end_idxs = list(start_idxs)
end_idxs.pop(0)
end_idxs.append(num_examples)
return zip(start_idxs, end_idxs, outfiles)
def generate_dataset(h5_filepath,
key_prefix,
out_filepaths,
chunk_size=1,
start_idx=None,
end_idx=None):
print("PID: %d, Key: %s, (Start, End): (%s, %s)" % (os.getpid(), key_prefix,
start_idx, end_idx))
generator_utils.generate_files(
dataset_generator(h5_filepath, key_prefix, chunk_size, start_idx,
end_idx), out_filepaths)
def dataset_generator(filepath,
dataset,
chunk_size=1,
start_idx=None,
end_idx=None):
"""Generate example dicts."""
encoder = dna_encoder.DNAEncoder(chunk_size=chunk_size)
with h5py.File(filepath, "r") as h5_file:
# Get input keys from h5_file
src_keys = [s % dataset for s in ["%s_in", "%s_na", "%s_out"]]
src_values = [h5_file[k] for k in src_keys]
inp_data, mask_data, out_data = src_values
assert len(set([v.len() for v in src_values])) == 1
if start_idx is None:
start_idx = 0
if end_idx is None:
end_idx = inp_data.len()
for i in range(start_idx, end_idx):
if i % 100 == 0:
print("Generating example %d for %s" % (i, dataset))
inputs, mask, outputs = inp_data[i], mask_data[i], out_data[i]
ex_dict = to_example_dict(encoder, inputs, mask, outputs)
# Original data has one output for every 128 input bases. Ensure that the
# ratio has been maintained given the chunk size and removing EOS.
assert (len(ex_dict["inputs"]) - 1) == ((
128 // chunk_size) * ex_dict["targets_shape"][0])
yield ex_dict
def to_example_dict(encoder, inputs, mask, outputs):
"""Convert single h5 record to an example dict."""
# Inputs
bases = []
input_ids = []
last_idx = -1
for row in np.argwhere(inputs):
idx, base_id = row
idx, base_id = int(idx), int(base_id)
assert idx > last_idx # if not, means 2 True values in 1 row
# Some rows are all False. Those rows are mapped to UNK_ID.
while idx != last_idx + 1:
bases.append(encoder.UNK)
last_idx += 1
bases.append(encoder.BASES[base_id])
last_idx = idx
assert len(inputs) == len(bases)
input_ids = encoder.encode(bases)
input_ids.append(text_encoder.EOS_ID)
# Targets: mask and output
targets_mask = [float(v) for v in mask]
# The output is (n, m); store targets_shape so that it can be reshaped
# properly on the other end.
targets = [float(v) for v in outputs.flatten()]
targets_shape = [int(dim) for dim in outputs.shape]
assert mask.shape[0] == outputs.shape[0]
example_keys = ["inputs", "targets_mask", "targets", "targets_shape"]
ex_dict = dict(
zip(example_keys, [input_ids, targets_mask, targets, targets_shape]))
return ex_dict
| apache-2.0 |
tcmitchell/geni-ch | tools/geni_constants.py | 2 | 2124 | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Sets of constants for defining relationships and roles in GENI
# Registry and Authorities
# Context Types
PROJECT_CONTEXT = 1
SLICE_CONTEXT = 2
RESOURCE_CONTEXT = 3
SERVICE_CONTEXT = 4
MEMBER_CONTEXT = 5
# For translating context types to names (if stored that way in database)
context_type_names = {PROJECT_CONTEXT : "PROJECT", SLICE_CONTEXT : "SLICE",
RESOURCE_CONTEXT : "RESOURCE", SERVICE_CONTEXT : "SERVICE",
MEMBER_CONTEXT : "MEMBER"}
# Attribute (role) Types
LEAD_ATTRIBUTE = 1
ADMIN_ATTRIBUTE = 2
MEMBER_ATTRIBUTE = 3
AUDITOR_ATTRIBUTE = 4
OPERATOR_ATTRIBUTE = 5
attribute_type_names = { LEAD_ATTRIBUTE : "LEAD", ADMIN_ATTRIBUTE : "ADMIN",
MEMBER_ATTRIBUTE : "MEMBER", AUDITOR_ATTRIBUTE : "AUDITOR",
OPERATOR_ATTRIBUTE : "OPERATOR"}
# Request status codes from rq_constants.php
PENDING_STATUS = 0
| mit |
redhat-cip/openstack-logcollector | openstack-logcollector/openstack/common/db/sqlalchemy/models.py | 1 | 3841 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from logcollector.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
| apache-2.0 |
rob356/SickRage | lib/github/PullRequestMergeStatus.py | 74 | 3202 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PullRequestMergeStatus(github.GithubObject.NonCompletableGithubObject):
"""
This class represents PullRequestMergeStatuss. The reference can be found here http://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
"""
@property
def merged(self):
"""
:type: bool
"""
return self._merged.value
@property
def message(self):
"""
:type: string
"""
return self._message.value
@property
def sha(self):
"""
:type: string
"""
return self._sha.value
def _initAttributes(self):
self._merged = github.GithubObject.NotSet
self._message = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "merged" in attributes: # pragma no branch
self._merged = self._makeBoolAttribute(attributes["merged"])
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
| gpl-3.0 |
madongfly/grpc | src/python/grpcio_test/grpc_test/framework/core/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.