repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
googleapis/python-channel | scripts/readme-gen/readme_gen.py | 122 | 1722 | #!/usr/bin/env python
# Copyright 2016 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates READMEs using configuration defined in yaml."""
import argparse
import io
import os
import subprocess
import jinja2
import yaml
jinja_env = jinja2.Environment(
trim_blocks=True,
loader=jinja2.FileSystemLoader(
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))))
README_TMPL = jinja_env.get_template('README.tmpl.rst')
def get_help(file):
return subprocess.check_output(['python', file, '--help']).decode()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('--destination', default='README.rst')
args = parser.parse_args()
source = os.path.abspath(args.source)
root = os.path.dirname(source)
destination = os.path.join(root, args.destination)
jinja_env.globals['get_help'] = get_help
with io.open(source, 'r') as f:
config = yaml.load(f)
# This allows get_help to execute in the right directory.
os.chdir(root)
output = README_TMPL.render(config)
with io.open(destination, 'w') as f:
f.write(output)
if __name__ == '__main__':
main()
| apache-2.0 |
Deepakpatle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/environment_unittest.py | 124 | 1853 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from .environment import Environment
class EnvironmentTest(unittest.TestCase):
def test_disable_gcc_smartquotes(self):
environment = Environment({})
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
self.assertEqual(env['LC_ALL'], 'C')
| bsd-3-clause |
xiangel/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_exceptions/test_error_middleware.py | 47 | 3389 | from paste.fixture import *
from paste.exceptions.errormiddleware import ErrorMiddleware
from paste import lint
from paste.util.quoting import strip_html
#
# For some strange reason, these 4 lines cannot be removed or the regression
# test breaks; is it counting the number of lines in the file somehow?
#
def do_request(app, expect_status=500):
app = lint.middleware(app)
app = ErrorMiddleware(app, {}, debug=True)
app = clear_middleware(app)
testapp = TestApp(app)
res = testapp.get('', status=expect_status,
expect_errors=True)
return res
def clear_middleware(app):
"""
The fixture sets paste.throw_errors, which suppresses exactly what
we want to test in this case. This wrapper also strips exc_info
on the *first* call to start_response (but not the second, or
subsequent calls.
"""
def clear_throw_errors(environ, start_response):
headers_sent = []
def replacement(status, headers, exc_info=None):
if headers_sent:
return start_response(status, headers, exc_info)
headers_sent.append(True)
return start_response(status, headers)
if 'paste.throw_errors' in environ:
del environ['paste.throw_errors']
return app(environ, replacement)
return clear_throw_errors
############################################################
## Applications that raise exceptions
############################################################
def bad_app():
"No argument list!"
return None
def unicode_bad_app(environ, start_response):
raise ValueError(u"\u1000")
def start_response_app(environ, start_response):
"raise error before start_response"
raise ValueError("hi")
def after_start_response_app(environ, start_response):
start_response("200 OK", [('Content-type', 'text/plain')])
raise ValueError('error2')
def iter_app(environ, start_response):
start_response("200 OK", [('Content-type', 'text/plain')])
return yielder([b'this', b' is ', b' a', None])
def yielder(args):
for arg in args:
if arg is None:
raise ValueError("None raises error")
yield arg
############################################################
## Tests
############################################################
def test_makes_exception():
res = do_request(bad_app)
assert '<html' in res
res = strip_html(str(res))
if six.PY3:
assert 'bad_app() takes 0 positional arguments but 2 were given' in res
else:
assert 'bad_app() takes no arguments (2 given' in res, repr(res)
assert 'iterator = application(environ, start_response_wrapper)' in res
assert 'paste.lint' in res
assert 'paste.exceptions.errormiddleware' in res
def test_unicode_exception():
res = do_request(unicode_bad_app)
def test_start_res():
res = do_request(start_response_app)
res = strip_html(str(res))
assert 'ValueError: hi' in res
assert 'test_error_middleware' in res
assert ':52 in start_response_app' in res
def test_after_start():
res = do_request(after_start_response_app, 200)
res = strip_html(str(res))
#print res
assert 'ValueError: error2' in res
def test_iter_app():
res = do_request(lint.middleware(iter_app), 200)
#print res
assert 'None raises error' in res
assert 'yielder' in res
| apache-2.0 |
azaghal/ansible | test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py | 47 | 7573 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: vyos_command
author: Nathaniel Case (@Qalthos)
short_description: Run one or more commands on VyOS devices
description:
- The command module allows running one or more commands on remote devices running
VyOS. This module can also be introspected to validate key parameters before returning
successfully. If the conditional statements are not met in the wait period, the
task fails.
- Certain C(show) commands in VyOS produce many lines of output and use a custom pager
that can cause this module to hang. If the value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH)
is not set, the default number of 10000 is used.
extends_documentation_fragment:
- vyos.vyos.vyos
options:
commands:
description:
- The ordered set of commands to execute on the remote device running VyOS. The
output from the command execution is returned to the playbook. If the I(wait_for)
argument is provided, the module is not returned until the condition is satisfied
or the number of retries has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command and what conditionals
to apply. This argument will cause the task to wait for a particular conditional
to be true before moving forward. If the conditional is not true by the configured
I(retries), the task fails. See examples.
aliases:
- waitfor
match:
description:
- The I(match) argument is used in conjunction with the I(wait_for) argument to
specify the match policy. Valid values are C(all) or C(any). If the value is
set to C(all) then all conditionals in the wait_for must be satisfied. If the
value is set to C(any) then only one of the values must be satisfied.
default: all
choices:
- any
- all
retries:
description:
- Specifies the number of retries a command should be tried before it is considered
failed. The command is run on the target device every retry and evaluated against
the I(wait_for) conditionals.
default: 10
interval:
description:
- Configures the interval in seconds to wait between I(retries) of the command.
If the command does not pass the specified conditions, the interval indicates
how long to wait before trying the command again.
default: 1
notes:
- Tested against VyOS 1.1.8 (helium).
- Running C(show system boot-messages all) will cause the module to hang since VyOS
is using a custom pager setting to display the output of that command.
- If a command sent to the device requires answering a prompt, it is possible to pass
a dict containing I(command), I(answer) and I(prompt). See examples.
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
"""
EXAMPLES = """
tasks:
- name: show configuration on ethernet devices eth0 and eth1
vyos_command:
commands:
- show interfaces ethernet {{ item }}
with_items:
- eth0
- eth1
- name: run multiple commands and check if version output contains specific version string
vyos_command:
commands:
- show version
- show hardware cpu
wait_for:
- "result[0] contains 'VyOS 1.1.7'"
- name: run command that requires answering a prompt
vyos_command:
commands:
- command: 'rollback 1'
prompt: 'Proceed with reboot? [confirm][y]'
answer: y
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
Conditional,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
transform_commands,
to_lines,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
run_commands,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
vyos_argument_spec,
)
def parse_commands(module, warnings):
commands = transform_commands(module)
if module.check_mode:
for item in list(commands):
if not item["command"].startswith("show"):
warnings.append(
"Only show commands are supported when using check mode, not "
"executing %s" % item["command"]
)
commands.remove(item)
return commands
def main():
spec = dict(
commands=dict(type="list", required=True),
wait_for=dict(type="list", aliases=["waitfor"]),
match=dict(default="all", choices=["all", "any"]),
retries=dict(default=10, type="int"),
interval=dict(default=1, type="int"),
)
spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
result = {"changed": False, "warnings": warnings}
commands = parse_commands(module, warnings)
wait_for = module.params["wait_for"] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params["retries"]
interval = module.params["interval"]
match = module.params["match"]
for _ in range(retries):
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == "any":
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = "One or more conditional statements have not been satisfied"
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update(
{"stdout": responses, "stdout_lines": list(to_lines(responses)),}
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
ModdedPA/android_external_chromium_org | chrome/test/pyautolib/prefs_info.py | 69 | 3629 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python representation for Chromium Preferences.
Obtain one of these from a call to PyUITest::GetPrefsInfo() or
PyUITest::GetLocalStatePrefsInfo().
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
info = self.GetPrefsInfo() # fetch prefs snapshot
print info.Prefs() # all prefs
print info.Prefs('session.restore_on_startup') # a single pref
See more tests in chrome/test/functional/prefs.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class PrefsInfo(object):
"""Represent info for Chromium preferences.
The info is represented as a hierarchy of prefs values.
The values could be plain (integer, bool, float) or complex (like
dictionary, list).
"""
def __init__(self, prefs_dict):
"""Initialize a PrefsInfo from a json string.
Args:
prefs_dict: a dictionary as returned by the IPC command 'GetPrefsInfo'.
A typical dict representing prefs snapshot looks like:
{ u'prefs':
{ u'alternate_error_pages': {u'enabled': True},
u'autofill': { u'auxiliary_profiles_enabled': False,
u'default_creditcard': u'',
u'default_profile': u'',
u'enabled': True,
u'infobar_shown': False,
u'negative_upload_rate': 0.01,
u'positive_upload_rate': 0.01},
u'bookmark_bar': {u'show_on_all_tabs': False},
...
...
}
}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in PrefsInfo() in automation_provider.cc
self.prefsdict = prefs_dict
if self.prefsdict.has_key('error'):
raise JSONInterfaceError(self.prefsdict['error'])
def Prefs(self, path=None):
"""Get preferences.
The preference dictionary (when using path=None) looks like:
{ u'alternate_error_pages': {u'enabled': True},
u'autofill': { u'auxiliary_profiles_enabled': False,
u'default_creditcard': u'',
u'default_profile': u'',
u'enabled': True,
u'infobar_shown': False,
u'negative_upload_rate': 0.01,
u'positive_upload_rate': 0.01},
u'bookmark_bar': {u'show_on_all_tabs': False},
...
...
}
In this case, to fetch the preference value for autofill enabled, use
'autofill.enabled' as the path.
Args:
path: If specified, return the preference item for the given path.
path is a dot-separated string like "session.restore_on_startup".
One of the equivalent names in chrome/common/pref_names.h could
also be used.
Returns:
preference value. It could be a dictionary (like the example above), a
list or a plain value.
None, if prefernece for path not found (if path is given).
"""
all = self.prefsdict.get('prefs', {})
if not path: # No path given. Return all prefs.
return all
for part in path.split('.'): # Narrow down to the requested prefs path.
all = all.get(part)
if all is None: return None
return all
| bsd-3-clause |
kelvin13/Knockout | pygments/lexers/dalvik.py | 47 | 4420 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
| gpl-3.0 |
starsplatter/Ubiqu-Ity | Ity/Utilities/FilePaths.py | 2 | 10276 | # coding=utf-8
__author__ = 'kohlmannj'
import os
import re
from collections import OrderedDict
def get_file_paths(
paths=(),
patterns=None,
recursion_levels=8,
valid_paths=set(),
excluded_paths=set(),
debug=False
):
"""
Given a list of paths, returns two sets of file paths: one set containing
"valid" file paths, and another set containing "excluded" file paths, which
have failed the validation criteria or are directories (i.e. NOT files).
"Valid" means that:
* The files at said paths exist.
* The path is to a file (or symlink or something), and NOT to a directory.
* The path matches one of the regular expression patterns in path_patterns.
For performance reasons, we do not attempt to open the files for reading,
so functions that actually perform real work with these paths should
perform their own checks / error handling for readability and such.
Filtering via regular expression is disabled when path_patterns is None.
This method is optionally recursive. The number of recursion levels (i.e.
the number of descents into subfolders) can be controlled:
* recursion_levels is True --> recurse ad infinitum.
* type(recursion_levels) is int and recursion_levels > 0 -->
recurse to a maximum of recursion_level levels.
* Otherwise (i.e. recursion_levels is False, == 0, is None, etc.),
recursion is disabled.
We use 8 as the default value of recursion_levels to prevent total runaway
recursion calls. Note that if recursion_levels equals 0 or is False, *no*
paths represnting folders will be recursed into; the only valid paths will
be paths to *files*.
Note that if recursion_levels equals 1, we won't even get a folder's contents.
Keyword arguments:
paths -- tuple of file system paths to folders or files.
(default (), an empty tuple)
patterns -- tuple of regular expressions to filter file paths by,
or None if we don't want to filter by regex pattern/s.
(default None)
recursion_levels -- int or boolean indicating how many folder levels deep
the function should go. A positive non-zero integer
indicates a maximum recursion depth, True indicates
infinite recursion depth, and anything else (0, False,
None, etc.) indicates that we shouldn't recurse.
(default 8)
valid_paths -- set of strs containing the unique set of "validated"
file paths. Used as an argument to pass the "global"
set to recursive calls of the function.
(default set())
excluded_paths -- set of strs containing the unique set of file paths
that are "invalid" and thereby excluded from the set of
"valid" paths. Used as an argument to pass the "global"
set to recursive calls of the function.
(default set())
debug -- bool indicating whether or not we should print some
debug output to stdout while running the function.
(default False)
"""
# Some setup for recursion_levels.
# Prevent runaways by default.
next_recursion_levels = False
if recursion_levels is True:
next_recursion_levels = True
elif type(recursion_levels) is int:
# Next recursion level should be one less.
if recursion_levels > 0:
# We'll recurse for this level and however many more levels.
next_recursion_levels = recursion_levels - 1
if next_recursion_levels <= 0:
next_recursion_levels = False
# We're definitely recursing for this level.
recursion_levels = True
else:
# We'll recurse for this level, but no deeper.
next_recursion_levels = False
# Okay, we got garbage, so disable recursion.
else:
recursion_levels = False
next_recursion_levels = False
# Alright bus (er, function?), do your stuff!
for path in paths:
# Get the most absolute formulation of the path we can muster.
path = os.path.abspath(os.path.expanduser(path))
# Is this a duplicate path we've already validated?
if path in valid_paths or path in excluded_paths:
# Skip duplicate paths.
if debug:
print "Skipping duplicate path '%s'" % path
continue
# Alright, file or directory? Let's ask.
# is the path to a *file*?
if os.path.isfile(path):
# Add this path, no more questions asked, if we are NOT
# filtering by regular expression (i.e. path_patterns is None).
if patterns is None:
valid_paths.add(path)
# Okay, we *are* filtering by regular expression. Get crackin'.
else:
for pattern in patterns:
# Add the path and break as soon as we get a match for
# any pattern.
match = re.search(pattern, path)
if match is not None:
valid_paths.add(path)
break
# No matches found if we made it here, so add the path to
# the excluded paths set.
excluded_paths.add(path)
# Is this a directory? Should we recurse into it?
elif os.path.isdir(path) and recursion_levels is True:
if debug:
print "Recursively validating paths inside '%s'..." % path
# Get this directory's contents
child_paths = tuple([os.path.join(path, child_path) for child_path in os.listdir(path)])
# Validate paths recursively for child_paths with whatever the
# next level of recursion should be.
get_file_paths(
child_paths,
patterns=patterns,
recursion_levels=next_recursion_levels,
valid_paths=valid_paths,
excluded_paths=excluded_paths,
debug=debug
)
else:
# Can't do anything else with it, so exclude it.
excluded_paths.add(path)
# All done, so return both sets of valid and excluded file paths .
return valid_paths, excluded_paths
def valid_paths(
paths=(),
patterns=('\.txt$',),
recursion_levels=8,
debug=False
):
"""
Returns a tuple of strings that are "valid" file paths. Also corrects input
in the case when paths is a single str instead of a list of strs.
Please refer to the docstring for get_text_paths for more
information about what we consider a "valid" file path.
By default, only considers file paths ending in ".txt" to be valid.
Keyword arguments:
paths -- tuple of strings representing file paths to validate.
We'll do The Right Thing(TM) if given a str instead,
though.
(default (), an empty tuple)
patterns -- tuple of regular expression patterns by which to
validate (i.e. filter) file paths.
(default ('\.txt$',), i.e. "paths ending in '.txt'")
recursion_levels -- int or boolean indicating how many folder levels deep
the recursive get_file_paths() function should go.
A positive non-zero integer indicates a maximum
recursion depth, True indicates infinite recursion
depth, and anything else (0, False, None, etc.)
indicates that we shouldn't recurse.
(default True)
"""
# Correct the input if path is a str.
if type(paths) is str:
# I've screwed up when instantiating a GramCounter by setting paths
# to a str instead of a list. We can recover from that, of course.
paths = (paths,)
# Same correction for patterns value (if it's a str).
if type(patterns) is str:
patterns = (patterns,)
# Finally, return only the valid paths from get_text_paths().
return tuple(get_file_paths(paths, patterns=patterns, recursion_levels=recursion_levels, debug=debug)[0])
def get_files_in_path(path, extensions=(".txt",)):
files = OrderedDict()
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
file_path = os.path.join(root, filename)
name, ext = os.path.splitext(filename)
# Skip this file if its extension isn't one of the ones we're looking for.
if (
(extensions is not None or len(extensions) > 0)
and ext not in extensions
):
continue
if name in files.keys():
raise IOError("Attempting to add the same file to the data list a second time!")
files[name] = file_path
return files
def get_valid_path(path, relative_path_base=None, fallback_path=None):
# Did we get a str?
if type(path) is str:
# Is this a relative path?
if (
not os.path.isabs(path) and
type(relative_path_base) is str and
os.path.isabs(relative_path_base)
):
# Assume this relative path exists inside the corpus path (self.path).
path = os.path.join(
relative_path_base,
path
)
# Did we get None, something that isn't a str (or None)? Then fall back.
else:
path = fallback_path
# Is path still a str?
if type(path) is str:
# This call to os.path.abspath(), among other things, removes trailing
# slashes from the path.
path = os.path.abspath(path)
return path
| bsd-2-clause |
klintwood/python_koans | python2/koans/about_class_attributes.py | 74 | 4889 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog(object):
pass
def test_new_style_class_objects_are_objects(self):
# Note: Old style class instances are not objects but they are being
# phased out in Python 3.
fido = self.Dog()
self.assertEqual(__, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(__, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(__, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(__, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(__, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(__, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(__, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda: 'fidos wag'
self.assertEqual(__, fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
try:
rover.wag()
except Exception as ex:
self.assertMatch(__, ex[0])
# ------------------------------------------------------------------
class Dog2(object):
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_like_all_objects_classes_can_have_singleton_methods(self):
self.assertMatch(__, self.Dog2.growl())
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertMatch(__, fido.growl())
self.assertMatch(__, self.Dog2.growl())
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertMatch(__, self.Dog2.bark())
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertMatch(__, fido.bark())
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(
get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
try:
fido.name = "Fido"
except Exception as ex:
self.assertMatch(__, ex[0])
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual(__, fido.get_name_from_instance())
self.assertEqual(__, self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual(__, fido.get_name())
self.assertEqual(__, self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4(object):
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_you_can_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual(__, fido.__class__.a_class_method())
| mit |
akashsinghal/Speech-Memorization-App | Python_Backend/lib/requests/help.py | 70 | 3616 | """Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import idna
import urllib3
import chardet
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
# OPENSSL_VERSION_NUMBER doesn't exist in the Python 2.6 ssl module.
system_ssl = getattr(ssl, 'OPENSSL_VERSION_NUMBER', None)
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| apache-2.0 |
mwasilew/testmanager | testmanager/testplanner/admin.py | 1 | 1256 | # Copyright (C) 2014 Linaro Limited
#
# Author: Milosz Wasilewski <milosz.wasilewski@linaro.org>
#
# This file is part of Testmanager.
#
# Testmanager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Testmanager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Testmanager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from testmanager.testplanner.models import (
TestRepository,
Maintainer,
OS,
Scope,
Device,
TestPlan,
TestDefinition,
TestDefinitionRevision,
TestPlanTestDefinition
)
admin.site.register(TestRepository)
admin.site.register(Maintainer)
admin.site.register(OS)
admin.site.register(Scope)
admin.site.register(Device)
admin.site.register(TestPlan)
admin.site.register(TestDefinition)
admin.site.register(TestDefinitionRevision)
admin.site.register(TestPlanTestDefinition)
| agpl-3.0 |
kikocorreoso/brython | www/src/Lib/test/test_iter.py | 9 | 32264 | # Test iterators.
import sys
import unittest
from test.support import run_unittest, TESTFN, unlink, cpython_only
from test.support import check_free_after_iterating
import pickle
import collections.abc
# Test result of triple loop (too big to inline)
TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
(0, 1, 0), (0, 1, 1), (0, 1, 2),
(0, 2, 0), (0, 2, 1), (0, 2, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2),
(1, 1, 0), (1, 1, 1), (1, 1, 2),
(1, 2, 0), (1, 2, 1), (1, 2, 2),
(2, 0, 0), (2, 0, 1), (2, 0, 2),
(2, 1, 0), (2, 1, 1), (2, 1, 2),
(2, 2, 0), (2, 2, 1), (2, 2, 2)]
# Helper classes
class BasicIterClass:
def __init__(self, n):
self.n = n
self.i = 0
def __next__(self):
res = self.i
if res >= self.n:
raise StopIteration
self.i = res + 1
return res
def __iter__(self):
return self
class IteratingSequenceClass:
def __init__(self, n):
self.n = n
def __iter__(self):
return BasicIterClass(self.n)
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
class UnlimitedSequenceClass:
def __getitem__(self, i):
return i
class DefaultIterClass:
pass
class NoIterClass:
def __getitem__(self, i):
return i
__iter__ = None
# Main test suite
class TestCase(unittest.TestCase):
# Helper to check that an iterator returns a given sequence
def check_iterator(self, it, seq, pickle=True):
if pickle:
self.check_pickle(it, seq)
res = []
while 1:
try:
val = next(it)
except StopIteration:
break
res.append(val)
self.assertEqual(res, seq)
# Helper to check that a for loop generates a given sequence
def check_for_loop(self, expr, seq, pickle=True):
if pickle:
self.check_pickle(iter(expr), seq)
res = []
for val in expr:
res.append(val)
self.assertEqual(res, seq)
# Helper to check picklability
def check_pickle(self, itorg, seq):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Cannot assert type equality because dict iterators unpickle as list
# iterators.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(list(it), seq)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
# Test basic use of iter() function
def test_iter_basic(self):
self.check_iterator(iter(range(10)), list(range(10)))
# Test that iter(iter(x)) is the same as iter(x)
def test_iter_idempotency(self):
seq = list(range(10))
it = iter(seq)
it2 = iter(it)
self.assertTrue(it is it2)
# Test that for loops over iterators work
def test_iter_for_loop(self):
self.check_for_loop(iter(range(10)), list(range(10)))
# Test several independent iterators over the same list
def test_iter_independence(self):
seq = range(3)
res = []
for i in iter(seq):
for j in iter(seq):
for k in iter(seq):
res.append((i, j, k))
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension using iterators
def test_nested_comprehensions_iter(self):
seq = range(3)
res = [(i, j, k)
for i in iter(seq) for j in iter(seq) for k in iter(seq)]
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension without iterators
def test_nested_comprehensions_for(self):
seq = range(3)
res = [(i, j, k) for i in seq for j in seq for k in seq]
self.assertEqual(res, TRIPLETS)
# Test a class with __iter__ in a for loop
def test_iter_class_for(self):
self.check_for_loop(IteratingSequenceClass(10), list(range(10)))
# Test a class with __iter__ with explicit iter()
def test_iter_class_iter(self):
self.check_iterator(iter(IteratingSequenceClass(10)), list(range(10)))
# Test for loop on a sequence class without __iter__
def test_seq_class_for(self):
self.check_for_loop(SequenceClass(10), list(range(10)))
# Test iter() on a sequence class without __iter__
def test_seq_class_iter(self):
self.check_iterator(iter(SequenceClass(10)), list(range(10)))
def test_mutating_seq_class_iter_pickle(self):
orig = SequenceClass(5)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, seq = pickle.loads(d)
seq.n = 7
self.assertIs(type(it), type(itorig))
self.assertEqual(list(it), list(range(7)))
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, seq = pickle.loads(d)
seq.n = 7
self.assertIs(type(it), type(itorig))
self.assertEqual(list(it), list(range(1, 7)))
# empty iterator
for i in range(1, 5):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, seq = pickle.loads(d)
seq.n = 7
self.assertIs(type(it), type(itorig))
self.assertEqual(list(it), list(range(5, 7)))
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, seq = pickle.loads(d)
seq.n = 7
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(list(it), [])
def test_mutating_seq_class_exhausted_iter(self):
a = SequenceClass(5)
exhit = iter(a)
empit = iter(a)
for x in exhit: # exhaust the iterator
next(empit) # not exhausted
a.n = 7
self.assertEqual(list(exhit), [])
self.assertEqual(list(empit), [5, 6])
self.assertEqual(list(a), [0, 1, 2, 3, 4, 5, 6])
# Test a new_style class with __iter__ but no next() method
def test_new_style_iter_class(self):
class IterClass(object):
def __iter__(self):
return self
self.assertRaises(TypeError, iter, IterClass())
# Test two-argument iter() with callable instance
def test_iter_callable(self):
class C:
def __init__(self):
self.i = 0
def __call__(self):
i = self.i
self.i = i + 1
if i > 100:
raise IndexError # Emergency stop
return i
self.check_iterator(iter(C(), 10), list(range(10)), pickle=False)
# Test two-argument iter() with function
def test_iter_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
return i
self.check_iterator(iter(spam, 10), list(range(10)), pickle=False)
# Test two-argument iter() with function that raises StopIteration
def test_iter_function_stop(self):
def spam(state=[0]):
i = state[0]
if i == 10:
raise StopIteration
state[0] = i+1
return i
self.check_iterator(iter(spam, 20), list(range(10)), pickle=False)
# Test exception propagation through function iterator
def test_exception_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise RuntimeError
return i
res = []
try:
for x in iter(spam, 20):
res.append(x)
except RuntimeError:
self.assertEqual(res, list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test exception propagation through sequence iterator
def test_exception_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise RuntimeError
return SequenceClass.__getitem__(self, i)
res = []
try:
for x in MySequenceClass(20):
res.append(x)
except RuntimeError:
self.assertEqual(res, list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test for StopIteration from __getitem__
def test_stop_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise StopIteration
return SequenceClass.__getitem__(self, i)
self.check_for_loop(MySequenceClass(20), list(range(10)), pickle=False)
# Test a big range
def test_iter_big_range(self):
self.check_for_loop(iter(range(10000)), list(range(10000)))
# Test an empty list
def test_iter_empty(self):
self.check_for_loop(iter([]), [])
# Test a tuple
def test_iter_tuple(self):
self.check_for_loop(iter((0,1,2,3,4,5,6,7,8,9)), list(range(10)))
# Test a range
def test_iter_range(self):
self.check_for_loop(iter(range(10)), list(range(10)))
# Test a string
def test_iter_string(self):
self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"])
# Test a directory
def test_iter_dict(self):
dict = {}
for i in range(10):
dict[i] = None
self.check_for_loop(dict, list(dict.keys()))
# Test a file
def test_iter_file(self):
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"], pickle=False)
self.check_for_loop(f, [], pickle=False)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test list()'s use of iterators.
def test_builtin_list(self):
self.assertEqual(list(SequenceClass(5)), list(range(5)))
self.assertEqual(list(SequenceClass(0)), [])
self.assertEqual(list(()), [])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(d), list(d.keys()))
self.assertRaises(TypeError, list, list)
self.assertRaises(TypeError, list, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
f.seek(0, 0)
self.assertEqual(list(f),
["0\n", "1\n", "2\n", "3\n", "4\n"])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test tuples()'s use of iterators.
def test_builtin_tuple(self):
self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
self.assertEqual(tuple(SequenceClass(0)), ())
self.assertEqual(tuple([]), ())
self.assertEqual(tuple(()), ())
self.assertEqual(tuple("abc"), ("a", "b", "c"))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(tuple(d), tuple(d.keys()))
self.assertRaises(TypeError, tuple, list)
self.assertRaises(TypeError, tuple, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
f.seek(0, 0)
self.assertEqual(tuple(f),
("0\n", "1\n", "2\n", "3\n", "4\n"))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test filter()'s use of iterators.
def test_builtin_filter(self):
self.assertEqual(list(filter(None, SequenceClass(5))),
list(range(1, 5)))
self.assertEqual(list(filter(None, SequenceClass(0))), [])
self.assertEqual(list(filter(None, ())), [])
self.assertEqual(list(filter(None, "abc")), ["a", "b", "c"])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(filter(None, d)), list(d.keys()))
self.assertRaises(TypeError, filter, None, list)
self.assertRaises(TypeError, filter, None, 42)
class Boolean:
def __init__(self, truth):
self.truth = truth
def __bool__(self):
return self.truth
bTrue = Boolean(True)
bFalse = Boolean(False)
class Seq:
def __init__(self, *args):
self.vals = args
def __iter__(self):
class SeqIter:
def __init__(self, vals):
self.vals = vals
self.i = 0
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i + 1
if i < len(self.vals):
return self.vals[i]
else:
raise StopIteration
return SeqIter(self.vals)
seq = Seq(*([bTrue, bFalse] * 25))
self.assertEqual(list(filter(lambda x: not x, seq)), [bFalse]*25)
self.assertEqual(list(filter(lambda x: not x, iter(seq))), [bFalse]*25)
# Test max() and min()'s use of iterators.
def test_builtin_max_min(self):
self.assertEqual(max(SequenceClass(5)), 4)
self.assertEqual(min(SequenceClass(5)), 0)
self.assertEqual(max(8, -1), 8)
self.assertEqual(min(8, -1), -1)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(max(d), "two")
self.assertEqual(min(d), "one")
self.assertEqual(max(d.values()), 3)
self.assertEqual(min(iter(d.values())), 1)
f = open(TESTFN, "w")
try:
f.write("medium line\n")
f.write("xtra large line\n")
f.write("itty-bitty line\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(min(f), "itty-bitty line\n")
f.seek(0, 0)
self.assertEqual(max(f), "xtra large line\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test map()'s use of iterators.
def test_builtin_map(self):
self.assertEqual(list(map(lambda x: x+1, SequenceClass(5))),
list(range(1, 6)))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(map(lambda k, d=d: (k, d[k]), d)),
list(d.items()))
dkeys = list(d.keys())
expected = [(i < len(d) and dkeys[i] or None,
i,
i < len(d) and dkeys[i] or None)
for i in range(3)]
f = open(TESTFN, "w")
try:
for i in range(10):
f.write("xy" * i + "\n") # line i has len 2*i+1
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(map(len, f)), list(range(1, 21, 2)))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test zip()'s use of iterators.
def test_builtin_zip(self):
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertEqual(list(zip(*[(1, 2), 'ab'])), [(1, 'a'), (2, 'b')])
self.assertRaises(TypeError, zip, None)
self.assertRaises(TypeError, zip, range(10), 42)
self.assertRaises(TypeError, zip, range(10), zip)
self.assertEqual(list(zip(IteratingSequenceClass(3))),
[(0,), (1,), (2,)])
self.assertEqual(list(zip(SequenceClass(3))),
[(0,), (1,), (2,)])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(d.items()), list(zip(d, d.values())))
# Generate all ints starting at constructor arg.
class IntsFrom:
def __init__(self, start):
self.i = start
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
return i
f = open(TESTFN, "w")
try:
f.write("a\n" "bbb\n" "cc\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(zip(IntsFrom(0), f, IntsFrom(-100))),
[(0, "a\n", -100),
(1, "bbb\n", -99),
(2, "cc\n", -98)])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
self.assertEqual(list(zip(range(5))), [(i,) for i in range(5)])
# Classes that lie about their lengths.
class NoGuessLen5:
def __getitem__(self, i):
if i >= 5:
raise IndexError
return i
class Guess3Len5(NoGuessLen5):
def __len__(self):
return 3
class Guess30Len5(NoGuessLen5):
def __len__(self):
return 30
def lzip(*args):
return list(zip(*args))
self.assertEqual(len(Guess3Len5()), 3)
self.assertEqual(len(Guess30Len5()), 30)
self.assertEqual(lzip(NoGuessLen5()), lzip(range(5)))
self.assertEqual(lzip(Guess3Len5()), lzip(range(5)))
self.assertEqual(lzip(Guess30Len5()), lzip(range(5)))
expected = [(i, i) for i in range(5)]
for x in NoGuessLen5(), Guess3Len5(), Guess30Len5():
for y in NoGuessLen5(), Guess3Len5(), Guess30Len5():
self.assertEqual(lzip(x, y), expected)
def test_unicode_join_endcase(self):
# This class inserts a Unicode object into its argument's natural
# iteration, in the 3rd position.
class OhPhooey:
def __init__(self, seq):
self.it = iter(seq)
self.i = 0
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
if i == 2:
return "fooled you!"
return next(self.it)
f = open(TESTFN, "w")
try:
f.write("a\n" + "b\n" + "c\n")
finally:
f.close()
f = open(TESTFN, "r")
# Nasty: string.join(s) can't know whether unicode.join() is needed
# until it's seen all of s's elements. But in this case, f's
# iterator cannot be restarted. So what we're testing here is
# whether string.join() can manage to remember everything it's seen
# and pass that on to unicode.join().
try:
got = " - ".join(OhPhooey(f))
self.assertEqual(got, "a\n - b\n - fooled you! - c\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with 'x in y' and 'x not in y'.
def test_in_and_not_in(self):
for sc5 in IteratingSequenceClass(5), SequenceClass(5):
for i in range(5):
self.assertIn(i, sc5)
for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5:
self.assertNotIn(i, sc5)
self.assertRaises(TypeError, lambda: 3 in 12)
self.assertRaises(TypeError, lambda: 3 not in map)
d = {"one": 1, "two": 2, "three": 3, 1j: 2j}
for k in d:
self.assertIn(k, d)
self.assertNotIn(k, d.values())
for v in d.values():
self.assertIn(v, d.values())
self.assertNotIn(v, d)
for k, v in d.items():
self.assertIn((k, v), d.items())
self.assertNotIn((v, k), d.items())
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for chunk in "abc":
f.seek(0, 0)
self.assertNotIn(chunk, f)
f.seek(0, 0)
self.assertIn((chunk + "\n"), f)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.countOf (PySequence_Count).
def test_countOf(self):
from operator import countOf
self.assertEqual(countOf([1,2,2,3,2,5], 2), 3)
self.assertEqual(countOf((1,2,2,3,2,5), 2), 3)
self.assertEqual(countOf("122325", "2"), 3)
self.assertEqual(countOf("122325", "6"), 0)
self.assertRaises(TypeError, countOf, 42, 1)
self.assertRaises(TypeError, countOf, countOf, countOf)
d = {"one": 3, "two": 3, "three": 3, 1j: 2j}
for k in d:
self.assertEqual(countOf(d, k), 1)
self.assertEqual(countOf(d.values(), 3), 3)
self.assertEqual(countOf(d.values(), 2j), 1)
self.assertEqual(countOf(d.values(), 1j), 0)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "b\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0):
f.seek(0, 0)
self.assertEqual(countOf(f, letter + "\n"), count)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.indexOf (PySequence_Index).
def test_indexOf(self):
from operator import indexOf
self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0)
self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1)
self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3)
self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6)
self.assertEqual(indexOf("122325", "2"), 1)
self.assertEqual(indexOf("122325", "5"), 5)
self.assertRaises(ValueError, indexOf, "122325", "6")
self.assertRaises(TypeError, indexOf, 42, 1)
self.assertRaises(TypeError, indexOf, indexOf, indexOf)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "d\n" "e\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
fiter = iter(f)
self.assertEqual(indexOf(fiter, "b\n"), 1)
self.assertEqual(indexOf(fiter, "d\n"), 1)
self.assertEqual(indexOf(fiter, "e\n"), 0)
self.assertRaises(ValueError, indexOf, fiter, "a\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
iclass = IteratingSequenceClass(3)
for i in range(3):
self.assertEqual(indexOf(iclass, i), i)
self.assertRaises(ValueError, indexOf, iclass, -1)
# Test iterators with file.writelines().
def test_writelines(self):
f = open(TESTFN, "w")
try:
self.assertRaises(TypeError, f.writelines, None)
self.assertRaises(TypeError, f.writelines, 42)
f.writelines(["1\n", "2\n"])
f.writelines(("3\n", "4\n"))
f.writelines({'5\n': None})
f.writelines({})
# Try a big chunk too.
class Iterator:
def __init__(self, start, finish):
self.start = start
self.finish = finish
self.i = self.start
def __next__(self):
if self.i >= self.finish:
raise StopIteration
result = str(self.i) + '\n'
self.i += 1
return result
def __iter__(self):
return self
class Whatever:
def __init__(self, start, finish):
self.start = start
self.finish = finish
def __iter__(self):
return Iterator(self.start, self.finish)
f.writelines(Whatever(6, 6+2000))
f.close()
f = open(TESTFN)
expected = [str(i) + "\n" for i in range(1, 2006)]
self.assertEqual(list(f), expected)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators on RHS of unpacking assignments.
def test_unpack_iter(self):
a, b = 1, 2
self.assertEqual((a, b), (1, 2))
a, b, c = IteratingSequenceClass(3)
self.assertEqual((a, b, c), (0, 1, 2))
try: # too many values
a, b = IteratingSequenceClass(3)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not enough values
a, b, c = IteratingSequenceClass(2)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not iterable
a, b, c = len
except TypeError:
pass
else:
self.fail("should have raised TypeError")
a, b, c = {1: 42, 2: 42, 3: 42}.values()
self.assertEqual((a, b, c), (42, 42, 42))
f = open(TESTFN, "w")
lines = ("a\n", "bb\n", "ccc\n")
try:
for line in lines:
f.write(line)
finally:
f.close()
f = open(TESTFN, "r")
try:
a, b, c = f
self.assertEqual((a, b, c), lines)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
(a, b), (c,) = IteratingSequenceClass(2), {42: 24}
self.assertEqual((a, b, c), (0, 1, 42))
@cpython_only
def test_ref_counting_behavior(self):
class C(object):
count = 0
def __new__(cls):
cls.count += 1
return object.__new__(cls)
def __del__(self):
cls = self.__class__
assert cls.count > 0
cls.count -= 1
x = C()
self.assertEqual(C.count, 1)
del x
self.assertEqual(C.count, 0)
l = [C(), C(), C()]
self.assertEqual(C.count, 3)
try:
a, b = iter(l)
except ValueError:
pass
del l
self.assertEqual(C.count, 0)
# Make sure StopIteration is a "sink state".
# This tests various things that weren't sink states in Python 2.2.1,
# plus various things that always were fine.
def test_sinkstate_list(self):
# This used to fail
a = list(range(5))
b = iter(a)
self.assertEqual(list(b), list(range(5)))
a.extend(range(5, 10))
self.assertEqual(list(b), [])
def test_sinkstate_tuple(self):
a = (0, 1, 2, 3, 4)
b = iter(a)
self.assertEqual(list(b), list(range(5)))
self.assertEqual(list(b), [])
def test_sinkstate_string(self):
a = "abcde"
b = iter(a)
self.assertEqual(list(b), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(list(b), [])
def test_sinkstate_sequence(self):
# This used to fail
a = SequenceClass(5)
b = iter(a)
self.assertEqual(list(b), list(range(5)))
a.n = 10
self.assertEqual(list(b), [])
def test_sinkstate_callable(self):
# This used to fail
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise AssertionError("shouldn't have gotten this far")
return i
b = iter(spam, 5)
self.assertEqual(list(b), list(range(5)))
self.assertEqual(list(b), [])
def test_sinkstate_dict(self):
# XXX For a more thorough test, see towards the end of:
# http://mail.python.org/pipermail/python-dev/2002-July/026512.html
a = {1:1, 2:2, 0:0, 4:4, 3:3}
for b in iter(a), a.keys(), a.items(), a.values():
b = iter(a)
self.assertEqual(len(list(b)), 5)
self.assertEqual(list(b), [])
def test_sinkstate_yield(self):
def gen():
for i in range(5):
yield i
b = gen()
self.assertEqual(list(b), list(range(5)))
self.assertEqual(list(b), [])
def test_sinkstate_range(self):
a = range(5)
b = iter(a)
self.assertEqual(list(b), list(range(5)))
self.assertEqual(list(b), [])
def test_sinkstate_enumerate(self):
a = range(5)
e = enumerate(a)
b = iter(e)
self.assertEqual(list(b), list(zip(range(5), range(5))))
self.assertEqual(list(b), [])
def test_3720(self):
# Avoid a crash, when an iterator deletes its next() method.
class BadIterator(object):
def __iter__(self):
return self
def __next__(self):
del BadIterator.__next__
return 1
try:
for i in BadIterator() :
pass
except TypeError:
pass
def test_extending_list_with_iterator_does_not_segfault(self):
# The code to extend a list with an iterator has a fair
# amount of nontrivial logic in terms of guessing how
# much memory to allocate in advance, "stealing" refs,
# and then shrinking at the end. This is a basic smoke
# test for that scenario.
def gen():
for i in range(500):
yield i
lst = [0] * 500
for i in range(240):
lst.pop(0)
lst.extend(gen())
self.assertEqual(len(lst), 760)
@cpython_only
def test_iter_overflow(self):
# Test for the issue 22939
it = iter(UnlimitedSequenceClass())
# Manually set `it_index` to PY_SSIZE_T_MAX-2 without a loop
it.__setstate__(sys.maxsize - 2)
self.assertEqual(next(it), sys.maxsize - 2)
self.assertEqual(next(it), sys.maxsize - 1)
with self.assertRaises(OverflowError):
next(it)
# Check that Overflow error is always raised
with self.assertRaises(OverflowError):
next(it)
def test_iter_neg_setstate(self):
it = iter(UnlimitedSequenceClass())
it.__setstate__(-42)
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
def test_free_after_iterating(self):
check_free_after_iterating(self, iter, SequenceClass, (0,))
def test_error_iter(self):
for typ in (DefaultIterClass, NoIterClass):
self.assertRaises(TypeError, iter, typ())
def test_main():
run_unittest(TestCase)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
geobricks/pgeorest | pgeorest/rest/filesystem.py | 1 | 1062 | import json
from flask import Blueprint
from flask import Response
from flask.ext.cors import cross_origin
from pgeo.error.custom_exceptions import PGeoException
from pgeo.utils.filesystem import create_filesystem
filesystem = Blueprint('filesystem', __name__)
@filesystem.route('/')
@cross_origin(origins='*')
def index():
"""
Welcome message
@return: welcome message
"""
return 'Welcome to the Filesystem module!'
@filesystem.route('/<source>')
@filesystem.route('/<source>/')
@cross_origin(origins='*')
def create_filesystem_service(source):
"""
This service create the filesystem structure as specified in the configuration file.
@param source: e.g. 'modis'
@return: Result of the operation
"""
try:
create_filesystem(source, {'product': 'Simone', 'year': '2014', 'day': '1'})
response = {'status_code': 200, 'status_message': 'OK'}
return Response(json.dumps(response), content_type='application/json; charset=utf-8')
except:
raise PGeoException('Error', status_code=500) | gpl-2.0 |
britcey/ansible | lib/ansible/modules/commands/expect.py | 3 | 7899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: expect
version_added: '2.0'
short_description: Executes a command and responds to prompts.
description:
- The C(expect) module executes a command and responds to prompts.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work.
options:
command:
description:
- The command module takes command to run.
required: true
creates:
description:
- A filename, when it already exists, this step will B(not) be run.
removes:
description:
- A filename, when it does not exist, this step will B(not) be run.
chdir:
description:
- Change into this directory before running the command.
responses:
description:
- Mapping of expected string/regex and string to respond with. If the
response is a list, successive matches return successive
responses. List functionality is new in 2.1.
required: true
timeout:
description:
- Amount of time in seconds to wait for the expected strings.
default: 30
echo:
description:
- Whether or not to echo out your response strings.
default: false
requirements:
- python >= 2.6
- pexpect >= 3.3
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else").
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i).
- By default, if a question is encountered multiple times, its string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
the response. The list functionality is new in 2.1.
- The M(expect) module is designed for simple scenarios. For more complex
needs, consider the use of expect code with the M(shell) or M(script)
modules. (An example is part of the M(shell) module documentation)
author: "Matt Martz (@sivel)"
'''
EXAMPLES = r'''
- name: Case insensitve password string match
expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
- name: Generic question with multiple different responses
expect:
command: /path/to/custom/command
responses:
Question:
- response1
- response2
- response3
'''
import datetime
import os
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def response_closure(module, question, responses):
resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
def wrapped(info):
try:
return next(resp_gen)
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(type='path'),
creates=dict(type='path'),
removes=dict(type='path'),
responses=dict(type='dict', required=True),
timeout=dict(type='int', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg='The pexpect python module is required')
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
echo = module.params['echo']
events = dict()
for key, value in responses.items():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = u'%s\n' % to_text(value).rstrip(u'\n')
events[to_text(key)] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(chdir)
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if os.path.exists(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not os.path.exists(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding='utf-8')
except TypeError:
# Use pexpect.runu in pexpect>=3.3,<4
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo)
except (TypeError, AttributeError):
e = get_exception()
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensentive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, e))
except pexpect.ExceptionPexpect:
e = get_exception()
module.fail_json(msg='%s' % e)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
ret = dict(
cmd=args,
stdout=out.rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is not None:
module.exit_json(**ret)
else:
ret['msg'] = 'command exceeded timeout'
module.fail_json(**ret)
if __name__ == '__main__':
main()
| gpl-3.0 |
htzy/bigfour | common/lib/xmodule/xmodule/modulestore/tests/django_utils.py | 21 | 16292 | # encoding: utf-8
"""
Modulestore configuration for test cases.
"""
import datetime
import pytz
from uuid import uuid4
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from request_cache.middleware import RequestCache
from courseware.field_overrides import OverrideFieldData # pylint: disable=import-error
from openedx.core.lib.tempdir import mkdtemp_clean
from xmodule.contentstore.django import _CONTENTSTORE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.sample_courses import default_block_info_tree, TOY_BLOCK_INFO_TREE
from xmodule.modulestore.tests.factories import XMODULE_FACTORY_LOCK
class StoreConstructors(object):
"""Enumeration of store constructor types."""
draft, split, xml = range(3)
def mixed_store_config(data_dir, mappings, include_xml=False, xml_source_dirs=None, store_order=None):
"""
Return a `MixedModuleStore` configuration, which provides
access to both Mongo- and XML-backed courses.
Args:
data_dir (string): the directory from which to load XML-backed courses.
mappings (string): a dictionary mapping course IDs to modulestores, for example:
{
'MITx/2.01x/2013_Spring': 'xml',
'edx/999/2013_Spring': 'default'
}
where 'xml' and 'default' are the two options provided by this configuration,
mapping (respectively) to XML-backed and Mongo-backed modulestores..
Keyword Args:
include_xml (boolean): If True, include an XML modulestore in the configuration.
xml_source_dirs (list): The directories containing XML courses to load from disk.
note: For the courses to be loaded into the XML modulestore and accessible do the following:
* include_xml should be True
* xml_source_dirs should be the list of directories (relative to data_dir)
containing the courses you want to load
* mappings should be configured, pointing the xml courses to the xml modulestore
"""
if store_order is None:
store_order = [StoreConstructors.draft, StoreConstructors.split]
if include_xml and StoreConstructors.xml not in store_order:
store_order.append(StoreConstructors.xml)
store_constructors = {
StoreConstructors.split: split_mongo_store_config(data_dir)['default'],
StoreConstructors.draft: draft_mongo_store_config(data_dir)['default'],
StoreConstructors.xml: xml_store_config(data_dir, source_dirs=xml_source_dirs)['default'],
}
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': mappings,
'stores': [store_constructors[store] for store in store_order],
}
}
}
return store
def draft_mongo_store_config(data_dir):
"""
Defines default module store using DraftMongoModuleStore.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'edxmako.shortcuts.render_to_string'
}
store = {
'default': {
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
'collection': 'modulestore_{0}'.format(uuid4().hex[:5]),
},
'OPTIONS': modulestore_options
}
}
return store
def split_mongo_store_config(data_dir):
"""
Defines split module store.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'edxmako.shortcuts.render_to_string',
}
store = {
'default': {
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
'collection': 'modulestore_{0}'.format(uuid4().hex[:5]),
},
'OPTIONS': modulestore_options
}
}
return store
def xml_store_config(data_dir, source_dirs=None):
"""
Defines default module store using XMLModuleStore.
Note: you should pass in a list of source_dirs that you care about,
otherwise all courses in the data_dir will be processed.
"""
store = {
'default': {
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': data_dir,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'source_dirs': source_dirs,
}
}
}
return store
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
# This is an XML only modulestore with only the toy course loaded
TEST_DATA_XML_MODULESTORE = xml_store_config(TEST_DATA_DIR, source_dirs=['toy'])
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with just the toy course loaded.
TEST_DATA_MIXED_TOY_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/toy/2012_Fall': 'xml', }, include_xml=True, xml_source_dirs=['toy']
)
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with common/test/data/2014 loaded, which is a course that is closed.
TEST_DATA_MIXED_CLOSED_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/detached_pages/2014': 'xml', }, include_xml=True, xml_source_dirs=['2014']
)
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with common/test/data/graded loaded, which is a course that is graded.
TEST_DATA_MIXED_GRADED_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/graded/2012_Fall': 'xml', }, include_xml=True, xml_source_dirs=['graded']
)
# All store requests now go through mixed
# Use this modulestore if you specifically want to test mongo and not a mocked modulestore.
# This modulestore definition below will not load any xml courses.
TEST_DATA_MONGO_MODULESTORE = mixed_store_config(mkdtemp_clean(), {}, include_xml=False)
# All store requests now go through mixed
# Use this modulestore if you specifically want to test split-mongo and not a mocked modulestore.
# This modulestore definition below will not load any xml courses.
TEST_DATA_SPLIT_MODULESTORE = mixed_store_config(
mkdtemp_clean(),
{},
include_xml=False,
store_order=[StoreConstructors.split, StoreConstructors.draft]
)
class ModuleStoreTestCase(TestCase):
"""
Subclass for any test case that uses a ModuleStore.
Ensures that the ModuleStore is cleaned before/after each test.
Usage:
1. Create a subclass of `ModuleStoreTestCase`
2. (optional) If you need a specific variety of modulestore, or particular ModuleStore
options, set the MODULESTORE class attribute of your test class to the
appropriate modulestore config.
For example:
class FooTest(ModuleStoreTestCase):
MODULESTORE = mixed_store_config(data_dir, mappings)
# ...
3. Use factories (e.g. `CourseFactory`, `ItemFactory`) to populate
the modulestore with test data.
NOTE:
* For Mongo-backed courses (created with `CourseFactory`),
the state of the course will be reset before/after each
test method executes.
* For XML-backed courses, the course state will NOT
reset between test methods (although it will reset
between test classes)
The reason is: XML courses are not editable, so to reset
a course you have to reload it from disk, which is slow.
If you do need to reset an XML course, use
`clear_existing_modulestores()` directly in
your `setUp()` method.
"""
MODULESTORE = mixed_store_config(mkdtemp_clean(), {}, include_xml=False)
def setUp(self, **kwargs):
"""
Creates a test User if `create_user` is True.
Returns the password for the test User.
Args:
create_user - specifies whether or not to create a test User. Default is True.
"""
settings_override = override_settings(MODULESTORE=self.MODULESTORE)
settings_override.__enter__()
self.addCleanup(settings_override.__exit__, None, None, None)
# Clear out any existing modulestores,
# which will cause them to be re-created
clear_existing_modulestores()
self.addCleanup(self.drop_mongo_collections)
self.addCleanup(RequestCache().clear_request_cache)
# Enable XModuleFactories for the space of this test (and its setUp).
self.addCleanup(XMODULE_FACTORY_LOCK.disable)
XMODULE_FACTORY_LOCK.enable()
# When testing CCX, we should make sure that
# OverrideFieldData.provider_classes is always reset to `None` so
# that they're recalculated for every test
OverrideFieldData.provider_classes = None
super(ModuleStoreTestCase, self).setUp()
self.store = modulestore()
uname = 'testuser'
email = 'test+courses@edx.org'
password = 'foo'
if kwargs.pop('create_user', True):
# Create the user so we can log them in.
self.user = User.objects.create_user(uname, email, password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
return password
def create_non_staff_user(self):
"""
Creates a non-staff test user.
Returns the non-staff test user and its password.
"""
uname = 'teststudent'
password = 'foo'
nonstaff_user = User.objects.create_user(uname, 'test+student@edx.org', password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
nonstaff_user.is_active = True
nonstaff_user.is_staff = False
nonstaff_user.save()
return nonstaff_user, password
def update_course(self, course, user_id):
"""
Updates the version of course in the modulestore
'course' is an instance of CourseDescriptor for which we want
to update metadata.
"""
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.update_item(course, user_id)
updated_course = self.store.get_course(course.id)
return updated_course
@staticmethod
@patch('xmodule.modulestore.django.create_modulestore_instance')
def drop_mongo_collections(mock_create):
"""
If using a Mongo-backed modulestore & contentstore, drop the collections.
"""
# Do not create the modulestore if it does not exist.
mock_create.return_value = None
module_store = modulestore()
if hasattr(module_store, '_drop_database'):
module_store._drop_database() # pylint: disable=protected-access
_CONTENTSTORE.clear()
if hasattr(module_store, 'close_connections'):
module_store.close_connections()
def create_sample_course(self, org, course, run, block_info_tree=None, course_fields=None):
"""
create a course in the default modulestore from the collection of BlockInfo
records defining the course tree
Returns:
course_loc: the CourseKey for the created course
"""
if block_info_tree is None:
block_info_tree = default_block_info_tree
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, None):
course = self.store.create_course(org, course, run, self.user.id, fields=course_fields)
self.course_loc = course.location # pylint: disable=attribute-defined-outside-init
def create_sub_tree(parent_loc, block_info):
"""Recursively creates a sub_tree on this parent_loc with this block."""
block = self.store.create_child(
self.user.id,
# TODO remove version_agnostic() when we impl the single transaction
parent_loc.version_agnostic(),
block_info.category, block_id=block_info.block_id,
fields=block_info.fields,
)
for tree in block_info.sub_tree:
create_sub_tree(block.location, tree)
setattr(self, block_info.block_id, block.location.version_agnostic())
for tree in block_info_tree:
create_sub_tree(self.course_loc, tree)
# remove version_agnostic when bulk write works
self.store.publish(self.course_loc.version_agnostic(), self.user.id)
return self.course_loc.course_key.version_agnostic()
def create_toy_course(self, org='edX', course='toy', run='2012_Fall'):
"""
Create an equivalent to the toy xml course
"""
# with self.store.bulk_operations(self.store.make_course_key(org, course, run)):
self.toy_loc = self.create_sample_course( # pylint: disable=attribute-defined-outside-init
org, course, run, TOY_BLOCK_INFO_TREE,
{
"textbooks": [["Textbook", "https://s3.amazonaws.com/edx-textbooks/guttag_computation_v3/"]],
"wiki_slug": "toy",
"display_name": "Toy Course",
"graded": True,
"discussion_topics": {"General": {"id": "i4x-edX-toy-course-2012_Fall"}},
"graceperiod": datetime.timedelta(days=2, seconds=21599),
"start": datetime.datetime(2015, 07, 17, 12, tzinfo=pytz.utc),
"xml_attributes": {"filename": ["course/2012_Fall.xml", "course/2012_Fall.xml"]},
"pdf_textbooks": [
{
"tab_title": "Sample Multi Chapter Textbook",
"id": "MyTextbook",
"chapters": [
{"url": "/static/Chapter1.pdf", "title": "Chapter 1"},
{"url": "/static/Chapter2.pdf", "title": "Chapter 2"}
]
}
],
"course_image": "just_a_test.jpg",
}
)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.toy_loc):
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="short_description",
fields={"data": "A course about toys."}
)
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="effort",
fields={"data": "6 hours"}
)
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="end_date",
fields={"data": "TBD"}
)
self.store.create_item(
self.user.id, self.toy_loc, "course_info", "handouts",
fields={"data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>"}
)
self.store.create_item(
self.user.id, self.toy_loc, "static_tab", "resources",
fields={"display_name": "Resources"},
)
self.store.create_item(
self.user.id, self.toy_loc, "static_tab", "syllabus",
fields={"display_name": "Syllabus"},
)
return self.toy_loc
| agpl-3.0 |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/pytz/__init__.py | 69 | 30648 | '''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database has historically been updated about 4 times a year
OLSON_VERSION = '2008c'
VERSION = OLSON_VERSION
#VERSION = OLSON_VERSION + '.2'
__version__ = OLSON_VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones',
'AmbiguousTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from tzinfo import AmbiguousTimeError, unpickler
from tzfile import build_tzinfo
# Use 2.3 sets module implementation if set builtin is not available
try:
set
except NameError:
from sets import Set as set
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available.
"""
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
else:
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
return open(filename, 'rb')
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
'''
if zone.upper() == 'UTC':
return utc
try:
zone = zone.encode('US-ASCII')
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Identical to the reference UTC implementation given in Python docs except
that it unpickles using the single module global instance defined beneath
this class declaration.
Also contains extra attributes and methods to match other pytz tzinfo
instances.
"""
zone = "UTC"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p), len(naive_p), len(p) - len(naive_p)
(60, 43, 17)
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
_country_timezones_cache = {}
def country_timezones(iso3166_code):
"""Return a list of timezones used in a particular country.
iso3166_code is the two letter code used to identify the country.
>>> country_timezones('ch')
['Europe/Zurich']
>>> country_timezones('CH')
['Europe/Zurich']
>>> country_timezones(u'ch')
['Europe/Zurich']
>>> country_timezones('XXX')
Traceback (most recent call last):
...
KeyError: 'XXX'
"""
iso3166_code = iso3166_code.upper()
if not _country_timezones_cache:
zone_tab = open_resource('zone.tab')
for line in zone_tab:
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
try:
_country_timezones_cache[code].append(zone)
except KeyError:
_country_timezones_cache[code] = [zone]
return _country_timezones_cache[iso3166_code]
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return None
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Knox_IN',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Katmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC']
common_timezones_set = set(common_timezones)
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Katmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu',
'posixrules']
all_timezones_set = set(all_timezones)
| agpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/subunit/test_results.py | 2 | 25578 | #
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""TestResult helper classes used to by subunit."""
import csv
import datetime
import testtools
from testtools.compat import all
from testtools.content import (
text_content,
TracebackContent,
)
from testtools import StreamResult
from subunit import iso8601
import subunit
# NOT a TestResult, because we are implementing the interface, not inheriting
# it.
class TestResultDecorator(object):
"""General pass-through decorator.
This provides a base that other TestResults can inherit from to
gain basic forwarding functionality. It also takes care of
handling the case where the target doesn't support newer methods
or features by degrading them.
"""
# XXX: Since lp:testtools r250, this is in testtools. Once it's released,
# we should gut this and just use that.
def __init__(self, decorated):
"""Create a TestResultDecorator forwarding to decorated."""
# Make every decorator degrade gracefully.
self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
def startTest(self, test):
return self.decorated.startTest(test)
def startTestRun(self):
return self.decorated.startTestRun()
def stopTest(self, test):
return self.decorated.stopTest(test)
def stopTestRun(self):
return self.decorated.stopTestRun()
def addError(self, test, err=None, details=None):
return self.decorated.addError(test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self.decorated.addFailure(test, err, details=details)
def addSuccess(self, test, details=None):
return self.decorated.addSuccess(test, details=details)
def addSkip(self, test, reason=None, details=None):
return self.decorated.addSkip(test, reason, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self.decorated.addExpectedFailure(test, err, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self.decorated.addUnexpectedSuccess(test, details=details)
def _get_failfast(self):
return getattr(self.decorated, 'failfast', False)
def _set_failfast(self, value):
self.decorated.failfast = value
failfast = property(_get_failfast, _set_failfast)
def progress(self, offset, whence):
return self.decorated.progress(offset, whence)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
@property
def shouldStop(self):
return self.decorated.shouldStop
def stop(self):
return self.decorated.stop()
@property
def testsRun(self):
return self.decorated.testsRun
def tags(self, new_tags, gone_tags):
return self.decorated.tags(new_tags, gone_tags)
def time(self, a_datetime):
return self.decorated.time(a_datetime)
class HookedTestResultDecorator(TestResultDecorator):
"""A TestResult which calls a hook on every event."""
def __init__(self, decorated):
self.super = super(HookedTestResultDecorator, self)
self.super.__init__(decorated)
def startTest(self, test):
self._before_event()
return self.super.startTest(test)
def startTestRun(self):
self._before_event()
return self.super.startTestRun()
def stopTest(self, test):
self._before_event()
return self.super.stopTest(test)
def stopTestRun(self):
self._before_event()
return self.super.stopTestRun()
def addError(self, test, err=None, details=None):
self._before_event()
return self.super.addError(test, err, details=details)
def addFailure(self, test, err=None, details=None):
self._before_event()
return self.super.addFailure(test, err, details=details)
def addSuccess(self, test, details=None):
self._before_event()
return self.super.addSuccess(test, details=details)
def addSkip(self, test, reason=None, details=None):
self._before_event()
return self.super.addSkip(test, reason, details=details)
def addExpectedFailure(self, test, err=None, details=None):
self._before_event()
return self.super.addExpectedFailure(test, err, details=details)
def addUnexpectedSuccess(self, test, details=None):
self._before_event()
return self.super.addUnexpectedSuccess(test, details=details)
def progress(self, offset, whence):
self._before_event()
return self.super.progress(offset, whence)
def wasSuccessful(self):
self._before_event()
return self.super.wasSuccessful()
@property
def shouldStop(self):
self._before_event()
return self.super.shouldStop
def stop(self):
self._before_event()
return self.super.stop()
def time(self, a_datetime):
self._before_event()
return self.super.time(a_datetime)
class AutoTimingTestResultDecorator(HookedTestResultDecorator):
"""Decorate a TestResult to add time events to a test run.
By default this will cause a time event before every test event,
but if explicit time data is being provided by the test run, then
this decorator will turn itself off to prevent causing confusion.
"""
def __init__(self, decorated):
self._time = None
super(AutoTimingTestResultDecorator, self).__init__(decorated)
def _before_event(self):
time = self._time
if time is not None:
return
time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
self.decorated.time(time)
def progress(self, offset, whence):
return self.decorated.progress(offset, whence)
@property
def shouldStop(self):
return self.decorated.shouldStop
def time(self, a_datetime):
"""Provide a timestamp for the current test activity.
:param a_datetime: If None, automatically add timestamps before every
event (this is the default behaviour if time() is not called at
all). If not None, pass the provided time onto the decorated
result object and disable automatic timestamps.
"""
self._time = a_datetime
return self.decorated.time(a_datetime)
class TagsMixin(object):
def __init__(self):
self._clear_tags()
def _clear_tags(self):
self._global_tags = set(), set()
self._test_tags = None
def _get_active_tags(self):
global_new, global_gone = self._global_tags
if self._test_tags is None:
return set(global_new)
test_new, test_gone = self._test_tags
return global_new.difference(test_gone).union(test_new)
def _get_current_scope(self):
if self._test_tags:
return self._test_tags
return self._global_tags
def _flush_current_scope(self, tag_receiver):
new_tags, gone_tags = self._get_current_scope()
if new_tags or gone_tags:
tag_receiver.tags(new_tags, gone_tags)
if self._test_tags:
self._test_tags = set(), set()
else:
self._global_tags = set(), set()
def startTestRun(self):
self._clear_tags()
def startTest(self, test):
self._test_tags = set(), set()
def stopTest(self, test):
self._test_tags = None
def tags(self, new_tags, gone_tags):
"""Handle tag instructions.
Adds and removes tags as appropriate. If a test is currently running,
tags are not affected for subsequent tests.
:param new_tags: Tags to add,
:param gone_tags: Tags to remove.
"""
current_new_tags, current_gone_tags = self._get_current_scope()
current_new_tags.update(new_tags)
current_new_tags.difference_update(gone_tags)
current_gone_tags.update(gone_tags)
current_gone_tags.difference_update(new_tags)
class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
"""Collapses many 'tags' calls into one where possible."""
def __init__(self, result):
super(TagCollapsingDecorator, self).__init__(result)
self._clear_tags()
def _before_event(self):
self._flush_current_scope(self.decorated)
def tags(self, new_tags, gone_tags):
TagsMixin.tags(self, new_tags, gone_tags)
class TimeCollapsingDecorator(HookedTestResultDecorator):
"""Only pass on the first and last of a consecutive sequence of times."""
def __init__(self, decorated):
super(TimeCollapsingDecorator, self).__init__(decorated)
self._last_received_time = None
self._last_sent_time = None
def _before_event(self):
if self._last_received_time is None:
return
if self._last_received_time != self._last_sent_time:
self.decorated.time(self._last_received_time)
self._last_sent_time = self._last_received_time
self._last_received_time = None
def time(self, a_time):
# Don't upcall, because we don't want to call _before_event, it's only
# for non-time events.
if self._last_received_time is None:
self.decorated.time(a_time)
self._last_sent_time = a_time
self._last_received_time = a_time
def and_predicates(predicates):
"""Return a predicate that is true iff all predicates are true."""
# XXX: Should probably be in testtools to be better used by matchers. jml
return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
def make_tag_filter(with_tags, without_tags):
"""Make a callback that checks tests against tags."""
with_tags = with_tags and set(with_tags) or None
without_tags = without_tags and set(without_tags) or None
def check_tags(test, outcome, err, details, tags):
if with_tags and not with_tags <= tags:
return False
if without_tags and bool(without_tags & tags):
return False
return True
return check_tags
class _PredicateFilter(TestResultDecorator, TagsMixin):
def __init__(self, result, predicate):
super(_PredicateFilter, self).__init__(result)
self._clear_tags()
self.decorated = TimeCollapsingDecorator(
TagCollapsingDecorator(self.decorated))
self._predicate = predicate
# The current test (for filtering tags)
self._current_test = None
# Has the current test been filtered (for outputting test tags)
self._current_test_filtered = None
# Calls to this result that we don't know whether to forward on yet.
self._buffered_calls = []
def filter_predicate(self, test, outcome, error, details):
return self._predicate(
test, outcome, error, details, self._get_active_tags())
def addError(self, test, err=None, details=None):
if (self.filter_predicate(test, 'error', err, details)):
self._buffered_calls.append(
('addError', [test, err], {'details': details}))
else:
self._filtered()
def addFailure(self, test, err=None, details=None):
if (self.filter_predicate(test, 'failure', err, details)):
self._buffered_calls.append(
('addFailure', [test, err], {'details': details}))
else:
self._filtered()
def addSkip(self, test, reason=None, details=None):
if (self.filter_predicate(test, 'skip', reason, details)):
self._buffered_calls.append(
('addSkip', [test, reason], {'details': details}))
else:
self._filtered()
def addExpectedFailure(self, test, err=None, details=None):
if self.filter_predicate(test, 'expectedfailure', err, details):
self._buffered_calls.append(
('addExpectedFailure', [test, err], {'details': details}))
else:
self._filtered()
def addUnexpectedSuccess(self, test, details=None):
self._buffered_calls.append(
('addUnexpectedSuccess', [test], {'details': details}))
def addSuccess(self, test, details=None):
if (self.filter_predicate(test, 'success', None, details)):
self._buffered_calls.append(
('addSuccess', [test], {'details': details}))
else:
self._filtered()
def _filtered(self):
self._current_test_filtered = True
def startTest(self, test):
"""Start a test.
Not directly passed to the client, but used for handling of tags
correctly.
"""
TagsMixin.startTest(self, test)
self._current_test = test
self._current_test_filtered = False
self._buffered_calls.append(('startTest', [test], {}))
def stopTest(self, test):
"""Stop a test.
Not directly passed to the client, but used for handling of tags
correctly.
"""
if not self._current_test_filtered:
for method, args, kwargs in self._buffered_calls:
getattr(self.decorated, method)(*args, **kwargs)
self.decorated.stopTest(test)
self._current_test = None
self._current_test_filtered = None
self._buffered_calls = []
TagsMixin.stopTest(self, test)
def tags(self, new_tags, gone_tags):
TagsMixin.tags(self, new_tags, gone_tags)
if self._current_test is not None:
self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
else:
return super(_PredicateFilter, self).tags(new_tags, gone_tags)
def time(self, a_time):
return self.decorated.time(a_time)
def id_to_orig_id(self, id):
if id.startswith("subunit.RemotedTestCase."):
return id[len("subunit.RemotedTestCase."):]
return id
class TestResultFilter(TestResultDecorator):
"""A pyunit TestResult interface implementation which filters tests.
Tests that pass the filter are handed on to another TestResult instance
for further processing/reporting. To obtain the filtered results,
the other instance must be interrogated.
:ivar result: The result that tests are passed to after filtering.
:ivar filter_predicate: The callback run to decide whether to pass
a result.
"""
def __init__(self, result, filter_error=False, filter_failure=False,
filter_success=True, filter_skip=False, filter_xfail=False,
filter_predicate=None, fixup_expected_failures=None):
"""Create a FilterResult object filtering to result.
:param filter_error: Filter out errors.
:param filter_failure: Filter out failures.
:param filter_success: Filter out successful tests.
:param filter_skip: Filter out skipped tests.
:param filter_xfail: Filter out expected failure tests.
:param filter_predicate: A callable taking (test, outcome, err,
details, tags) and returning True if the result should be passed
through. err and details may be none if no error or extra
metadata is available. outcome is the name of the outcome such
as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
are still supported but should be updated to accept the tags
parameter for efficiency.
:param fixup_expected_failures: Set of test ids to consider known
failing.
"""
predicates = []
if filter_error:
predicates.append(
lambda t, outcome, e, d, tags: outcome != 'error')
if filter_failure:
predicates.append(
lambda t, outcome, e, d, tags: outcome != 'failure')
if filter_success:
predicates.append(
lambda t, outcome, e, d, tags: outcome != 'success')
if filter_skip:
predicates.append(
lambda t, outcome, e, d, tags: outcome != 'skip')
if filter_xfail:
predicates.append(
lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
if filter_predicate is not None:
def compat(test, outcome, error, details, tags):
# 0.0.7 and earlier did not support the 'tags' parameter.
try:
return filter_predicate(
test, outcome, error, details, tags)
except TypeError:
return filter_predicate(test, outcome, error, details)
predicates.append(compat)
predicate = and_predicates(predicates)
super(TestResultFilter, self).__init__(
_PredicateFilter(result, predicate))
if fixup_expected_failures is None:
self._fixup_expected_failures = frozenset()
else:
self._fixup_expected_failures = fixup_expected_failures
def addError(self, test, err=None, details=None):
if self._failure_expected(test):
self.addExpectedFailure(test, err=err, details=details)
else:
super(TestResultFilter, self).addError(
test, err=err, details=details)
def addFailure(self, test, err=None, details=None):
if self._failure_expected(test):
self.addExpectedFailure(test, err=err, details=details)
else:
super(TestResultFilter, self).addFailure(
test, err=err, details=details)
def addSuccess(self, test, details=None):
if self._failure_expected(test):
self.addUnexpectedSuccess(test, details=details)
else:
super(TestResultFilter, self).addSuccess(test, details=details)
def _failure_expected(self, test):
return (test.id() in self._fixup_expected_failures)
class TestIdPrintingResult(testtools.TestResult):
"""Print test ids to a stream.
Implements both TestResult and StreamResult, for compatibility.
"""
def __init__(self, stream, show_times=False, show_exists=False):
"""Create a FilterResult object outputting to stream."""
super(TestIdPrintingResult, self).__init__()
self._stream = stream
self.show_exists = show_exists
self.show_times = show_times
def startTestRun(self):
self.failed_tests = 0
self.__time = None
self._test = None
self._test_duration = 0
self._active_tests = {}
def addError(self, test, err):
self.failed_tests += 1
self._test = test
def addFailure(self, test, err):
self.failed_tests += 1
self._test = test
def addSuccess(self, test):
self._test = test
def addSkip(self, test, reason=None, details=None):
self._test = test
def addUnexpectedSuccess(self, test, details=None):
self.failed_tests += 1
self._test = test
def addExpectedFailure(self, test, err=None, details=None):
self._test = test
def reportTest(self, test_id, duration):
if self.show_times:
seconds = duration.seconds
seconds += duration.days * 3600 * 24
seconds += duration.microseconds / 1000000.0
self._stream.write(test_id + ' %0.3f\n' % seconds)
else:
self._stream.write(test_id + '\n')
def startTest(self, test):
self._start_time = self._time()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
if not test_id:
return
if timestamp is not None:
self.time(timestamp)
if test_status=='exists':
if self.show_exists:
self.reportTest(test_id, 0)
elif test_status in ('inprogress', None):
self._active_tests[test_id] = self._time()
else:
self._end_test(test_id)
def _end_test(self, test_id):
test_start = self._active_tests.pop(test_id, None)
if not test_start:
test_duration = 0
else:
test_duration = self._time() - test_start
self.reportTest(test_id, test_duration)
def stopTest(self, test):
test_duration = self._time() - self._start_time
self.reportTest(self._test.id(), test_duration)
def time(self, time):
self.__time = time
def _time(self):
return self.__time
def wasSuccessful(self):
"Tells whether or not this result was a success"
return self.failed_tests == 0
def stopTestRun(self):
for test_id in list(self._active_tests.keys()):
self._end_test(test_id)
class TestByTestResult(testtools.TestResult):
"""Call something every time a test completes."""
# XXX: In testtools since lp:testtools r249. Once that's released, just
# import that.
def __init__(self, on_test):
"""Construct a ``TestByTestResult``.
:param on_test: A callable that take a test case, a status (one of
"success", "failure", "error", "skip", or "xfail"), a start time
(a ``datetime`` with timezone), a stop time, an iterable of tags,
and a details dict. Is called at the end of each test (i.e. on
``stopTest``) with the accumulated values for that test.
"""
super(TestByTestResult, self).__init__()
self._on_test = on_test
def startTest(self, test):
super(TestByTestResult, self).startTest(test)
self._start_time = self._now()
# There's no supported (i.e. tested) behaviour that relies on these
# being set, but it makes me more comfortable all the same. -- jml
self._status = None
self._details = None
self._stop_time = None
def stopTest(self, test):
self._stop_time = self._now()
super(TestByTestResult, self).stopTest(test)
self._on_test(
test=test,
status=self._status,
start_time=self._start_time,
stop_time=self._stop_time,
# current_tags is new in testtools 0.9.13.
tags=getattr(self, 'current_tags', None),
details=self._details)
def _err_to_details(self, test, err, details):
if details:
return details
return {'traceback': TracebackContent(err, test)}
def addSuccess(self, test, details=None):
super(TestByTestResult, self).addSuccess(test)
self._status = 'success'
self._details = details
def addFailure(self, test, err=None, details=None):
super(TestByTestResult, self).addFailure(test, err, details)
self._status = 'failure'
self._details = self._err_to_details(test, err, details)
def addError(self, test, err=None, details=None):
super(TestByTestResult, self).addError(test, err, details)
self._status = 'error'
self._details = self._err_to_details(test, err, details)
def addSkip(self, test, reason=None, details=None):
super(TestByTestResult, self).addSkip(test, reason, details)
self._status = 'skip'
if details is None:
details = {'reason': text_content(reason)}
elif reason:
# XXX: What if details already has 'reason' key?
details['reason'] = text_content(reason)
self._details = details
def addExpectedFailure(self, test, err=None, details=None):
super(TestByTestResult, self).addExpectedFailure(test, err, details)
self._status = 'xfail'
self._details = self._err_to_details(test, err, details)
def addUnexpectedSuccess(self, test, details=None):
super(TestByTestResult, self).addUnexpectedSuccess(test, details)
self._status = 'success'
self._details = details
class CsvResult(TestByTestResult):
def __init__(self, stream):
super(CsvResult, self).__init__(self._on_test)
self._write_row = csv.writer(stream).writerow
def _on_test(self, test, status, start_time, stop_time, tags, details):
self._write_row([test.id(), status, start_time, stop_time])
def startTestRun(self):
super(CsvResult, self).startTestRun()
self._write_row(['test', 'status', 'start_time', 'stop_time'])
class CatFiles(StreamResult):
"""Cat file attachments received to a stream."""
def __init__(self, byte_stream):
self.stream = subunit.make_stream_binary(byte_stream)
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
if file_name is not None:
self.stream.write(file_bytes)
self.stream.flush()
| agpl-3.0 |
j-griffith/cinder | cinder/tests/unit/monkey_patch_example/__init__.py | 57 | 1112 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Example Module for testing utils.monkey_patch()."""
CALLED_FUNCTION = []
def example_decorator(name, function):
"""decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
CALLED_FUNCTION.append(name)
return function(*args, **kwarg)
return wrapped_func
| apache-2.0 |
thnee/ansible | test/units/modules/network/fortios/test_fortios_system_ipv6_tunnel.py | 21 | 8491 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_ipv6_tunnel
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_ipv6_tunnel.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_ipv6_tunnel_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_tunnel': {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
expected_data = {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-tunnel', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_ipv6_tunnel_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_tunnel': {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
expected_data = {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-tunnel', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_ipv6_tunnel_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_ipv6_tunnel': {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'ipv6-tunnel', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_ipv6_tunnel_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_ipv6_tunnel': {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'ipv6-tunnel', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_ipv6_tunnel_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_tunnel': {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
expected_data = {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-tunnel', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_ipv6_tunnel_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_tunnel': {
'random_attribute_not_valid': 'tag',
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_tunnel.fortios_system(input_data, fos_instance)
expected_data = {
'destination': 'test_value_3',
'interface': 'test_value_4',
'name': 'default_name_5',
'source': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-tunnel', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
django-nonrel/django-nonrel | django/utils/encoding.py | 92 | 7321 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal,
tuple, list, dict)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe="/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| bsd-3-clause |
DavidLP/home-assistant | tests/components/homekit_controller/test_config_flow.py | 1 | 26318 | """Tests for homekit_controller config flow."""
import json
from unittest import mock
import homekit
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from tests.common import MockConfigEntry
from tests.components.homekit_controller.common import (
Accessory, FakeService, setup_platform
)
ERROR_MAPPING_FORM_FIXTURE = [
(homekit.MaxPeersError, 'max_peers_error'),
(homekit.BusyError, 'busy_error'),
(homekit.MaxTriesError, 'max_tries_error'),
(KeyError, 'pairing_failed'),
]
ERROR_MAPPING_ABORT_FIXTURE = [
(homekit.AccessoryNotFoundError, 'accessory_not_found_error'),
]
def _setup_flow_handler(hass):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
return flow
async def test_discovery_works(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
controller = mock.Mock()
controller.pairings = {
'00:00:00:00:00:00': pairing,
}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_discovery_works_upper_case(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'MD': 'TestDevice',
'ID': '00:00:00:00:00:00',
'C#': 1,
'SF': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
controller = mock.Mock()
controller.pairings = {
'00:00:00:00:00:00': pairing,
}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_discovery_works_missing_csharp(hass):
"""Test a device being discovered that has missing mdns attrs."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
controller = mock.Mock()
controller.pairings = {
'00:00:00:00:00:00': pairing,
}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_pair_already_paired_1(hass):
"""Already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_paired'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
async def test_discovery_ignored_model(hass):
"""Already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'BSB002',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'ignored_model'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
async def test_discovery_invalid_config_entry(hass):
"""There is already a config entry for the pairing id but its invalid."""
MockConfigEntry(domain='homekit_controller', data={
'AccessoryPairingID': '00:00:00:00:00:00'
}).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
async def test_discovery_already_configured(hass):
"""Already configured."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]['00:00:00:00:00:00'] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
assert conn.async_config_num_changed.call_count == 0
async def test_discovery_already_configured_config_change(hass):
"""Already configured."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 2,
'sf': 0,
}
}
await setup_platform(hass)
conn = mock.Mock()
conn.config_num = 1
hass.data[KNOWN_DEVICES]['00:00:00:00:00:00'] = conn
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
assert conn.async_refresh_entity_map.call_args == mock.call(2)
async def test_pair_unable_to_pair(hass):
"""Pairing completed without exception, but didn't create a pairing."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == 'unable_to_pair'
@pytest.mark.parametrize("exception,expected", ERROR_MAPPING_ABORT_FIXTURE)
async def test_pair_abort_errors(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
controller.perform_pairing.side_effect = exception('error')
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'abort'
assert result['reason'] == expected
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
@pytest.mark.parametrize("exception,expected", ERROR_MAPPING_FORM_FIXTURE)
async def test_pair_form_errors(hass, exception, expected):
"""Test various pairing errors."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
controller.perform_pairing.side_effect = exception('error')
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == expected
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
async def test_pair_authentication_error(hass):
"""Pairing code is incorrect."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
exc = homekit.AuthenticationError('Invalid pairing code')
controller.perform_pairing.side_effect = exc
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == 'authentication_error'
async def test_pair_unknown_error(hass):
"""Pairing failed for an unknown rason."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
exc = homekit.UnknownError('Unknown error')
controller.perform_pairing.side_effect = exc
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'form'
assert result['errors']['pairing_code'] == 'unknown_error'
async def test_pair_already_paired(hass):
"""Device is already paired."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
flow = _setup_flow_handler(hass)
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
controller = mock.Mock()
controller.pairings = {}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
exc = homekit.UnavailableError('Unavailable error')
controller.perform_pairing.side_effect = exc
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'abort'
assert result['reason'] == 'already_paired'
async def test_import_works(hass):
"""Test a device being discovered."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
import_info = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
result = await flow.async_import_legacy_pairing(
discovery_info['properties'], import_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_import_already_configured(hass):
"""Test importing a device from .homekit that is already a ConfigEntry."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
import_info = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
config_entry = MockConfigEntry(
domain='homekit_controller',
data=import_info,
)
config_entry.add_to_hass(hass)
flow = _setup_flow_handler(hass)
result = await flow.async_import_legacy_pairing(
discovery_info['properties'], import_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
async def test_user_works(hass):
"""Test user initiated disovers devices."""
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 1,
}
}
pairing = mock.Mock(pairing_data={
'AccessoryPairingID': '00:00:00:00:00:00',
})
pairing.list_accessories_and_characteristics.return_value = [{
"aid": 1,
"services": [{
"characteristics": [{
"type": "23",
"value": "Koogeek-LS1-20833F"
}],
"type": "3e",
}]
}]
controller = mock.Mock()
controller.pairings = {
'00:00:00:00:00:00': pairing,
}
controller.discover.return_value = [
discovery_info,
]
flow = _setup_flow_handler(hass)
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_user()
assert result['type'] == 'form'
assert result['step_id'] == 'user'
result = await flow.async_step_user({
'device': '00:00:00:00:00:00',
})
assert result['type'] == 'form'
assert result['step_id'] == 'pair'
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value = controller
result = await flow.async_step_pair({
'pairing_code': '111-22-33',
})
assert result['type'] == 'create_entry'
assert result['title'] == 'Koogeek-LS1-20833F'
assert result['data'] == pairing.pairing_data
async def test_user_no_devices(hass):
"""Test user initiated pairing where no devices discovered."""
flow = _setup_flow_handler(hass)
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value.discover.return_value = []
result = await flow.async_step_user()
assert result['type'] == 'abort'
assert result['reason'] == 'no_devices'
async def test_user_no_unpaired_devices(hass):
"""Test user initiated pairing where no unpaired devices discovered."""
flow = _setup_flow_handler(hass)
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
with mock.patch('homekit.Controller') as controller_cls:
controller_cls.return_value.discover.return_value = [
discovery_info,
]
result = await flow.async_step_user()
assert result['type'] == 'abort'
assert result['reason'] == 'no_devices'
async def test_parse_new_homekit_json(hass):
"""Test migrating recent .homekit/pairings.json files."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, False]
read_data = {
'00:00:00:00:00:00': pairing.pairing_data,
}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', mock_open):
with mock.patch('os.path', mock_path):
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
async def test_parse_old_homekit_json(hass):
"""Test migrating original .homekit/hk-00:00:00:00:00:00 files."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_path = mock.Mock()
mock_path.exists.side_effect = [False, True]
mock_listdir = mock.Mock()
mock_listdir.return_value = [
'hk-00:00:00:00:00:00',
'pairings.json'
]
read_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_open = mock.mock_open(read_data=json.dumps(read_data))
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', mock_open):
with mock.patch('os.path', mock_path):
with mock.patch('os.listdir', mock_listdir):
result = await flow.async_step_discovery(discovery_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
async def test_parse_overlapping_homekit_json(hass):
"""Test migrating .homekit/pairings.json files when hk- exists too."""
service = FakeService('public.hap.service.lightbulb')
on_char = service.add_characteristic('on')
on_char.value = 1
accessory = Accessory('TestDevice', 'example.com', 'Test', '0001', '0.1')
accessory.services.append(service)
fake_controller = await setup_platform(hass)
pairing = fake_controller.add([accessory])
pairing.pairing_data = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_listdir = mock.Mock()
mock_listdir.return_value = [
'hk-00:00:00:00:00:00',
'pairings.json'
]
mock_path = mock.Mock()
mock_path.exists.side_effect = [True, True]
# First file to get loaded is .homekit/pairing.json
read_data_1 = {
'00:00:00:00:00:00': {
'AccessoryPairingID': '00:00:00:00:00:00',
}
}
mock_open_1 = mock.mock_open(read_data=json.dumps(read_data_1))
# Second file to get loaded is .homekit/hk-00:00:00:00:00:00
read_data_2 = {
'AccessoryPairingID': '00:00:00:00:00:00',
}
mock_open_2 = mock.mock_open(read_data=json.dumps(read_data_2))
side_effects = [mock_open_1.return_value, mock_open_2.return_value]
discovery_info = {
'name': 'TestDevice',
'host': '127.0.0.1',
'port': 8080,
'properties': {
'md': 'TestDevice',
'id': '00:00:00:00:00:00',
'c#': 1,
'sf': 0,
}
}
flow = _setup_flow_handler(hass)
pairing_cls_imp = "homekit.controller.ip_implementation.IpPairing"
with mock.patch(pairing_cls_imp) as pairing_cls:
pairing_cls.return_value = pairing
with mock.patch('builtins.open', side_effect=side_effects):
with mock.patch('os.path', mock_path):
with mock.patch('os.listdir', mock_listdir):
result = await flow.async_step_discovery(discovery_info)
await hass.async_block_till_done()
assert result['type'] == 'create_entry'
assert result['title'] == 'TestDevice'
assert result['data']['AccessoryPairingID'] == '00:00:00:00:00:00'
assert flow.context == {'title_placeholders': {'name': 'TestDevice'}}
| apache-2.0 |
drpeteb/scipy | scipy/interpolate/tests/test_interpolate.py | 43 | 67695 | from __future__ import division, print_function, absolute_import
import itertools
import warnings
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_array_equal,
dec, TestCase, run_module_suite, assert_allclose)
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._version import NumpyVersion
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
ppform, splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn)
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated
class TestInterp2D(TestCase):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setUp(self):
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
interp1d(self.x10, self.y10, kind='linear')
interp1d(self.x10, self.y10, kind='cubic')
interp1d(self.x10, self.y10, kind='slinear')
interp1d(self.x10, self.y10, kind='quadratic')
interp1d(self.x10, self.y10, kind='zero')
interp1d(self.x10, self.y10, kind='nearest')
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
3.0)
assert_equal(interp1d(self.x10, self.y10).axis, 0)
assert_equal(interp1d(self.x10, self.y210).axis, 1)
assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
def test_assume_sorted(self):
# Check for unsorted arrays
interp10 = interp1d(self.x10, self.y10)
interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
interp10([2.4, 5.6, 6.0]))
# Check assume_sorted keyword (defaults to False)
interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=False)
assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=True)
# Should raise an error for unsorted input if assume_sorted=True
assert_raises(ValueError, interp10_assume_kw2, self.x10)
# Check that if y is a 2-D array, things are still consistent
interp10_y_2d = interp1d(self.x10, self.y210)
interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
assert_array_almost_equal(interp10_y_2d(self.x10),
interp10_y_2d_unsorted(self.x10))
def test_linear(self):
# Check the actual implementation of linear interpolation.
interp10 = interp1d(self.x10, self.y10)
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]))
def test_cubic(self):
# Check the actual implementation of spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='cubic')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]),)
def test_nearest(self):
# Check the actual implementation of nearest-neighbour interpolation.
interp10 = interp1d(self.x10, self.y10, kind='nearest')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]),)
@dec.knownfailureif(True, "zero-order splines fail for the last point")
def test_zero(self):
# Check the actual implementation of zero-order spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='zero')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]))
def _bounds_check(self, kind='linear'):
# Test that our handling of out-of-bounds input is correct.
extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
bounds_error=False, kind=kind)
assert_array_equal(extrap10(11.2), np.array(self.fill_value))
assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
np.array(self.fill_value),)
assert_array_equal(extrap10._check_bounds(
np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
np.array([True, False, False, False, True]))
raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
kind=kind)
assert_raises(ValueError, raises_bounds_error, -1.0)
assert_raises(ValueError, raises_bounds_error, 11.0)
raises_bounds_error([0.0, 5.0, 9.0])
def _bounds_check_int_nan_fill(self, kind='linear'):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
yi = c(x - 1)
assert_(np.isnan(yi[0]))
assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
def test_bounds(self):
for kind in ('linear', 'cubic', 'nearest',
'slinear', 'zero', 'quadratic'):
self._bounds_check(kind)
self._bounds_check_int_nan_fill(kind)
def _nd_check_interp(self, kind='linear'):
# Check the behavior when the inputs and outputs are multidimensional.
# Multidimensional input.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
np.array([[3., 5.], [2., 7.]]))
# Scalar input -> 0-dim scalar array output
assert_(isinstance(interp10(1.2), np.ndarray))
assert_equal(interp10(1.2).shape, ())
# Multidimensional outputs.
interp210 = interp1d(self.x10, self.y210, kind=kind)
assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
assert_array_almost_equal(interp210(np.array([1., 2.])),
np.array([[1., 2.], [11., 12.]]))
interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
assert_array_almost_equal(interp102(np.array([1., 3.])),
np.array([[2., 3.], [6., 7.]]))
# Both at the same time!
x_new = np.array([[3., 5.], [2., 7.]])
assert_array_almost_equal(interp210(x_new),
np.array([[[3., 5.], [2., 7.]],
[[13., 15.], [12., 17.]]]))
assert_array_almost_equal(interp102(x_new),
np.array([[[6., 7.], [10., 11.]],
[[4., 5.], [14., 15.]]]))
def _nd_check_shape(self, kind='linear'):
# Check large ndim output shape
a = [4, 5, 6, 7]
y = np.arange(np.prod(a)).reshape(*a)
for n, s in enumerate(a):
x = np.arange(s)
z = interp1d(x, y, axis=n, kind=kind)
assert_array_almost_equal(z(x), y, err_msg=kind)
x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
b = list(a)
b[n:n+1] = [2,3,1]
assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
def test_nd(self):
for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest'):
self._nd_check_interp(kind)
self._nd_check_shape(kind)
def _check_complex(self, dtype=np.complex_, kind='linear'):
x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
y = x * x ** (1 + 2j)
y = y.astype(dtype)
# simple test
c = interp1d(x, y, kind=kind)
assert_array_almost_equal(y[:-1], c(x)[:-1])
# check against interpolating real+imag separately
xi = np.linspace(1, 10, 31)
cr = interp1d(x, y.real, kind=kind)
ci = interp1d(x, y.imag, kind=kind)
assert_array_almost_equal(c(xi).real, cr(xi))
assert_array_almost_equal(c(xi).imag, ci(xi))
def test_complex(self):
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_complex(np.complex64, kind)
self._check_complex(np.complex128, kind)
@dec.knownfailureif(True, "zero-order splines fail for the last point")
def test_nd_zero_spline(self):
# zero-order splines don't get the last point right,
# see test_zero above
#yield self._nd_check_interp, 'zero'
#yield self._nd_check_interp, 'zero'
pass
def test_circular_refs(self):
# Test interp1d can be automatically garbage collected
x = np.linspace(0, 1)
y = np.linspace(0, 1)
# Confirm interp can be released from memory after use
with assert_deallocated(interp1d, x, y) as interp:
new_y = interp([0.1, 0.2])
del interp
class TestLagrange(TestCase):
def test_lagrange(self):
p = poly1d([5,2,1,4,3])
xs = np.arange(len(p.coeffs))
ys = p(xs)
pl = lagrange(xs,ys)
assert_array_almost_equal(p.coeffs,pl.coeffs)
class TestAkima1DInterpolator(TestCase):
def test_eval(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344, 5.9803623910336236590978842,
5.5067291516462386624652936, 5.2031367459745245795943447,
4.1796554159017080820603951, 3.4110386597938129327189927,
3.])
assert_allclose(ak(xi), yi)
def test_eval_2d(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.column_stack((y, 2. * y))
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi = np.column_stack((yi, 2. * yi))
assert_allclose(ak(xi), yi)
def test_eval_3d(self):
x = np.arange(0., 11.)
y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.empty((11, 2, 2))
y[:, 0, 0] = y_
y[:, 1, 0] = 2. * y_
y[:, 0, 1] = 3. * y_
y[:, 1, 1] = 4. * y_
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.empty((13, 2, 2))
yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi[:, 0, 0] = yi_
yi[:, 1, 0] = 2. * yi_
yi[:, 0, 1] = 3. * yi_
yi[:, 1, 1] = 4. * yi_
assert_allclose(ak(xi), yi)
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend()
except NotImplementedError as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise
class TestPPolyCommon(TestCase):
# test basic functionality for PPoly and BPoly
def test_sort_check(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_extend(self):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
for cls in (PPoly, BPoly):
pp = cls(c[:,:9], x[:10])
pp.extend(c[:,9:], x[10:])
pp2 = cls(c[:,10:], x[10:])
pp2.extend(c[:,:10], x[:10], right=False)
pp3 = cls(c, x)
assert_array_equal(pp.c, pp3.c)
assert_array_equal(pp.x, pp3.x)
assert_array_equal(pp2.c, pp3.c)
assert_array_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self):
# Test extending polynomial with different order one
np.random.seed(1234)
x = np.linspace(0, 1, 6)
c = np.random.rand(2, 5)
x2 = np.linspace(1, 2, 6)
c2 = np.random.rand(4, 5)
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = np.linspace(0, 1, 300, endpoint=False)
xi2 = np.linspace(1, 2, 300)
assert_allclose(pp1(xi1), pp_comb(xi1))
assert_allclose(pp2(xi2), pp_comb(xi2))
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert_equal(np.shape(p(0.5)), ())
assert_equal(np.shape(p(np.array(0.5))), ())
if NumpyVersion(np.__version__) >= '1.7.0':
# can't use dtype=object (with any numpy; what fails is
# constructing the object array here for old numpy)
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]]))
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
assert_allclose(p(xp, nu).real, p_re(xp, nu))
assert_allclose(p(xp, nu).imag, p_im(xp, nu))
def test_axis(self):
np.random.seed(12345)
c = np.random.rand(3, 4, 5, 6, 7, 8)
c_s = c.shape
xp = np.random.random((1, 2))
for axis in (0, 1, 2, 3):
k, m = c.shape[axis], c.shape[axis+1]
x = np.sort(np.random.rand(m+1))
for cls in (PPoly, BPoly):
p = cls(c, x, axis=axis)
assert_equal(p.c.shape,
c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
res = p(xp)
targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
assert_equal(res.shape, targ_shape)
# deriv/antideriv does not drop the axis
for p1 in [cls(c, x, axis=axis).derivative(),
cls(c, x, axis=axis).derivative(2),
cls(c, x, axis=axis).antiderivative(),
cls(c, x, axis=axis).antiderivative(2)]:
assert_equal(p1.axis, p.axis)
# c array needs two axes for the coefficients and intervals, so
# 0 <= axis < c.ndim-1; raise otherwise
for axis in (-1, 4, 5, 6):
for cls in (BPoly, PPoly):
assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
class TestPolySubclassing(TestCase):
class P(PPoly):
pass
class B(BPoly):
pass
def _make_polynomials(self):
np.random.seed(1234)
x = np.sort(np.random.random(3))
c = np.random.random((4, 2))
return self.P(c, x), self.B(c, x)
def test_derivative(self):
pp, bp = self._make_polynomials()
for p in (pp, bp):
pd = p.derivative()
assert_equal(p.__class__, pd.__class__)
ppa = pp.antiderivative()
assert_equal(pp.__class__, ppa.__class__)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = self.P.from_spline(spl)
assert_equal(pp.__class__, self.P)
def test_conversions(self):
pp, bp = self._make_polynomials()
pp1 = self.P.from_bernstein_basis(bp)
assert_equal(pp1.__class__, self.P)
bp1 = self.B.from_power_basis(pp)
assert_equal(bp1.__class__, self.B)
def test_from_derivatives(self):
x = [0, 1, 2]
y = [[1], [2], [3]]
bp = self.B.from_derivatives(x, y)
assert_equal(bp.__class__, self.B)
class TestPPoly(TestCase):
def test_simple(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6) + c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
ip = p.antiderivative()
assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_vs_alternative_implementations(self):
np.random.seed(1234)
c = np.random.rand(3, 12, 22)
x = np.sort(np.r_[0, np.random.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
assert_allclose(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
assert_allclose(p(xp)[:,0], expected)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
assert_allclose(pp(xi), splev(xi, spl))
def test_derivative_simple(self):
np.random.seed(1234)
c = np.array([[4, 3, 2, 1]]).T
dc = np.array([[3*4, 2*3, 2]]).T
ddc = np.array([[2*3*4, 1*2*3]]).T
x = np.array([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
assert_allclose(pp.derivative().c, dpp.c)
assert_allclose(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
assert_allclose(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
err_msg="dx=%d" % (dx,))
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
assert_equal(q.c, [[1, 0.5], [0, 1]])
assert_equal(q.x, [0, 1, 2])
assert_allclose(p.integrate(0, 2), 1.5)
assert_allclose(q(2) - q(0), 1.5)
def test_antiderivative_simple(self):
np.random.seed(1234)
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = np.array([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
x = np.array([0, 0.25, 1])
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
assert_allclose(ipp.x, x)
assert_allclose(ipp.c.T, ic.T)
assert_allclose(iipp.c.T, iic.T)
assert_allclose(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
np.random.seed(1234)
x = np.linspace(0, 1, 30)**2
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
assert_allclose(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
def test_antiderivative_vs_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
assert_allclose(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
assert_allclose(ig, ipp(b) - ipp(a))
assert_allclose(ig, splint(a, b, spl))
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
assert_allclose(ig, ipp(b) - ipp(a))
assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
assert_allclose(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
assert_array_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [-2, 0])
assert_array_equal(pp.roots(extrapolate=False), [0])
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [0.5])
assert_array_equal(pp.roots(discontinuity=False), [])
def test_roots_random(self):
# Check high-order polynomials with random coefficients
np.random.seed(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
r = pp.roots(discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1, extrapolate=extrapolate)[:,i,j]
assert_allclose(val/cmpval, 0, atol=1e-7,
err_msg="(%r) r = %s" % (extrapolate,
repr(rr),))
# Check that we checked a number of roots
assert_(num > 100, repr(num))
def test_roots_croots(self):
# Test the complex root finding algorithm
np.random.seed(1234)
for k in range(1, 15):
c = np.random.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w)
if k == 1:
assert_(np.isnan(w).all())
continue
res = 0
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
assert_allclose(res, 0, atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert_(np.isnan(pp([-0.1, 1.1])).all())
assert_(np.isnan(pp_i([-0.1, 1.1])).all())
assert_(np.isnan(pp_d([-0.1, 1.1])).all())
assert_equal(pp.roots(), [1])
else:
assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
assert_allclose(pp.roots(), [1, -1])
class TestBPoly(TestCase):
def test_simple(self):
x = [0, 1]
c = [[3]]
bp = BPoly(c, x)
assert_allclose(bp(0.1), 3.)
def test_simple2(self):
x = [0, 1]
c = [[3], [1]]
bp = BPoly(c, x) # 3*(1-x) + 1*x
assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
def test_simple3(self):
x = [0, 1]
c = [[3], [1], [4]]
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
assert_allclose(bp(0.2),
3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
def test_simple4(self):
x = [0, 1]
c = [[1], [1], [1], [2]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3)
def test_simple5(self):
x = [0, 1]
c = [[1], [1], [8], [2], [1]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6)+c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
def test_interval_length(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
def test_two_intervals(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
assert_allclose(bp(0.4), 3 * 0.6*0.6)
assert_allclose(bp(1.7), 2 * (0.7/2)**2)
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert_(np.isnan(bp([-0.1, 2.1])).all())
assert_(np.isnan(bp_d([-0.1, 2.1])).all())
else:
assert_(not np.isnan(bp([-0.1, 2.1])).any())
assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
class TestBPolyCalculus(TestCase):
def test_derivative(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
bp_der = bp.derivative()
assert_allclose(bp_der(0.4), -6*(0.6))
assert_allclose(bp_der(1.7), 0.7)
# derivatives in-place
assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
[-6*(1-0.4), 6., 0.])
assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
[0.7, 1., 0])
def test_derivative_ppoly(self):
# make sure it's consistent w/ power basis
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
for d in range(k):
bp = bp.derivative()
pp = pp.derivative()
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(bp(xp), pp(xp))
def test_deriv_inplace(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
# test both real and complex coefficients
for cc in [c.copy(), c*(1. + 2.j)]:
bp = BPoly(cc, x)
xp = np.linspace(x[0], x[-1], 21)
for i in range(k):
assert_allclose(bp(xp, i), bp.derivative(i)(xp))
def test_antiderivative_simple(self):
# f(x) = x for x \in [0, 1),
# (x-1)/2 for x \in [1, 3]
#
# antiderivative is then
# F(x) = x**2 / 2 for x \in [0, 1),
# 0.5*x*(x/2 - 1) + A for x \in [1, 3]
# where A = 3/4 for continuity at x = 1.
x = [0, 1, 3]
c = [[0, 0], [1, 1]]
bp = BPoly(c, x)
bi = bp.antiderivative()
xx = np.linspace(0, 3, 11)
assert_allclose(bi(xx),
np.where(xx < 1, xx**2 / 2.,
0.5 * xx * (xx/2. - 1) + 3./4),
atol=1e-12, rtol=1e-12)
def test_der_antider(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
xx = np.linspace(x[0], x[-1], 100)
assert_allclose(bp.antiderivative().derivative()(xx),
bp(xx), atol=1e-12, rtol=1e-12)
def test_antider_ppoly(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
xx = np.linspace(x[0], x[-1], 10)
assert_allclose(bp.antiderivative(2)(xx),
pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
def test_antider_continuous(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x).antiderivative()
xx = bp.x[1:-1]
assert_allclose(bp(xx - 1e-14),
bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
assert_allclose(bp.integrate(0, 1),
pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
def test_integrate_extrap(self):
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
# default is extrapolate=True
assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
# .integrate argument overrides self.extrapolate
b1 = BPoly(c, x, extrapolate=False)
assert_(np.isnan(b1.integrate(0, 2)))
assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
def test_antider_neg(self):
# .derivative(-nu) ==> .andiderivative(nu) and vice versa
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
xx = np.linspace(0, 1, 21)
assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
atol=1e-12, rtol=1e-12)
assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
atol=1e-12, rtol=1e-12)
class TestPolyConversions(TestCase):
def test_bp_from_pp(self):
x = [0, 1, 3]
c = [[3, 2], [1, 8], [4, 3]]
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = [0.1, 1.4]
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_bp_from_pp_random(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_pp_from_bp(self):
x = [0, 1, 3]
c = [[3, 3], [1, 1], [4, 2]]
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
bp1 = BPoly.from_power_basis(pp)
xp = [0.1, 1.4]
assert_allclose(bp(xp), pp(xp))
assert_allclose(bp(xp), bp1(xp))
class TestBPolyFromDerivatives(TestCase):
def test_make_poly_1(self):
c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
assert_allclose(c1, [2., 3.])
def test_make_poly_2(self):
c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
assert_allclose(c1, [1., 1., 1.])
# f'(0) = 3
c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
assert_allclose(c2, [2., 7./2, 1.])
# f'(1) = 3
c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
assert_allclose(c3, [2., -0.5, 1.])
def test_make_poly_3(self):
# f'(0)=2, f''(0)=3
c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
assert_allclose(c1, [1., 5./3, 17./6, 4.])
# f'(1)=2, f''(1)=3
c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
assert_allclose(c2, [1., 19./6, 10./3, 4.])
# f'(0)=2, f'(1)=3
c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
assert_allclose(c3, [1., 5./3, 3., 4.])
def test_make_poly_12(self):
np.random.seed(12345)
ya = np.r_[0, np.random.random(5)]
yb = np.r_[0, np.random.random(5)]
c = BPoly._construct_from_derivatives(0, 1, ya, yb)
pp = BPoly(c[:, None], [0, 1])
for j in range(6):
assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
pp = pp.derivative()
def test_raise_degree(self):
np.random.seed(12345)
x = [0, 1]
k, d = 8, 5
c = np.random.random((k, 1, 2, 3, 4))
bp = BPoly(c, x)
c1 = BPoly._raise_degree(c, d)
bp1 = BPoly(c1, x)
xp = np.linspace(0, 1, 11)
assert_allclose(bp(xp), bp1(xp))
def test_xi_yi(self):
assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
def test_coords_order(self):
xi = [0, 0, 1]
yi = [[0], [0], [0]]
assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
def test_zeros(self):
xi = [0, 1, 2, 3]
yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
pp = BPoly.from_derivatives(xi, yi)
assert_(pp.c.shape == (4, 3))
ppd = pp.derivative()
for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
assert_allclose([pp(xp), ppd(xp)], [0., 0.])
def _make_random_mk(self, m, k):
# k derivatives at each breakpoint
np.random.seed(1234)
xi = np.asarray([1. * j**2 for j in range(m+1)])
yi = [np.random.random(k) for j in range(m+1)]
return xi, yi
def test_random_12(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi)
for order in range(k//2):
assert_allclose(pp(xi), [yy[order] for yy in yi])
pp = pp.derivative()
def test_order_zero(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
assert_raises(ValueError, BPoly.from_derivatives,
**dict(xi=xi, yi=yi, orders=0))
def test_orders_too_high(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
assert_raises(ValueError, BPoly.from_derivatives, # but this is not
**dict(xi=xi, yi=yi, orders=2*k))
def test_orders_global(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
# ok, this is confusing. Local polynomials will be of the order 5
# which means that up to the 2nd derivatives will be used at each point
order = 5
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2+1):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
# now repeat with `order` being even: on each interval, it uses
# order//2 'derivatives' @ the right-hand endpoint and
# order//2+1 @ 'derivatives' the left-hand endpoint
order = 6
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
def test_orders_local(self):
m, k = 7, 12
xi, yi = self._make_random_mk(m, k)
orders = [o + 1 for o in range(m)]
for i, x in enumerate(xi[1:-1]):
pp = BPoly.from_derivatives(xi, yi, orders=orders)
for j in range(orders[i] // 2 + 1):
assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
def test_yi_trailing_dims(self):
m, k = 7, 5
xi = np.sort(np.random.random(m+1))
yi = np.random.random((m+1, k, 6, 7, 8))
pp = BPoly.from_derivatives(xi, yi)
assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
class TestPpform(TestCase):
def test_shape(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
np.random.seed(1234)
c = np.random.rand(3, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
p = ppform(c, x)
xp = np.random.rand(3, 4)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert_(x[j] <= xp < x[j+1])
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
class TestRegularGridInterpolator(TestCase):
def _get_sample_4d(self):
# create a 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 4
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def _get_sample_4d_2(self):
# create another 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_list_input(self):
points, values = self._get_sample_4d()
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points,
values.tolist(),
method=method)
v1 = interp(sample.tolist())
interp = RegularGridInterpolator(points,
values,
method=method)
v2 = interp(sample)
assert_allclose(v1, v2)
def test_complex(self):
points, values = self._get_sample_4d()
values = values - 2j*values
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points, values,
method=method)
rinterp = RegularGridInterpolator(points, values.real,
method=method)
iinterp = RegularGridInterpolator(points, values.imag,
method=method)
v1 = interp(sample)
v2 = rinterp(sample) + 1j*iinterp(sample)
assert_allclose(v1, v2)
def test_linear_xi1d(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([0.1, 0.1, 10., 9.])
wanted = 1001.1
assert_array_almost_equal(interp(sample), wanted)
def test_linear_xi3d(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([0.1, 0.1, .9, .9])
wanted = 1100.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.1, 0.1, 0.1])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0., 0., 0., 0.])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([1., 1., 1., 1.])
wanted = 1111.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.4, 0.6, 0.9])
wanted = 1055.
assert_array_almost_equal(interp(sample), wanted)
def test_linear_edges(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
wanted = np.asarray([0., 1111.])
assert_array_almost_equal(interp(sample), wanted)
def test_valid_create(self):
# create a 2d grid of 3 points in each dimension
points = [(0., .5, 1.), (0., 1., .5)]
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis]
values1 = values[np.newaxis, :]
values = (values0 + values1 * 10)
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [((0., .5, 1.), ), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, .75, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values,
method="undefmethod")
def test_valid_call(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
assert_raises(ValueError, interp, sample, "undefmethod")
sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
assert_raises(ValueError, interp, sample)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
assert_raises(ValueError, interp, sample)
def test_out_of_bounds_extrap(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 1111., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_extrap2(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 11., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_fill(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=np.nan)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([np.nan, np.nan, np.nan])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
assert_array_almost_equal(interp(sample, method="linear"), wanted)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_linear_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
interp = RegularGridInterpolator((x, y), values,
method=method)
v1 = interp([0.4, 0.7])
interp = RegularGridInterpolator((x, y), values._v,
method=method)
v2 = interp([0.4, 0.7])
assert_allclose(v1, v2)
def test_invalid_fill_value(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.random.rand(5, 7)
# integers can be cast to floats
RegularGridInterpolator((x, y), values, fill_value=1)
# complex values cannot
assert_raises(ValueError, RegularGridInterpolator,
(x, y), values, fill_value=1+2j)
def test_fillvalue_type(self):
# from #3703; test that interpolator object construction succeeds
values = np.ones((10, 20, 30), dtype='>f4')
points = [np.arange(n) for n in values.shape]
xi = [(1, 1, 1)]
interpolator = RegularGridInterpolator(points, values)
interpolator = RegularGridInterpolator(points, values, fill_value=0.)
class MyValue(object):
"""
Minimal indexable object
"""
def __init__(self, shape):
self.ndim = 2
self.shape = shape
self._v = np.arange(np.prod(shape)).reshape(shape)
def __getitem__(self, idx):
return self._v[idx]
def __array_interface__(self):
return None
def __array__(self):
raise RuntimeError("No array representation")
class TestInterpN(TestCase):
def _sample_2d_data(self):
x = np.arange(1, 6)
x = np.array([.5, 2., 3., 4., 5.5])
y = np.arange(1, 6)
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
def test_list_input(self):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['nearest', 'linear', 'splinef2d']:
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
xi.tolist(), method=method)
assert_allclose(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
assert_allclose(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert_equal(v1.shape, (2, 3))
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_xi_broadcast(self):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
for method in ['nearest', 'linear', 'splinef2d']:
sample = (xi[:,None], yi[None,:])
v1 = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v1.shape, (2, 3))
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_nonscalar_values(self):
# Verify that non-scalar valued values also works
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
for method in ['nearest', 'linear']:
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v.shape, (7, 11, 6), err_msg=method)
vs = [interpn(points, values[...,j], sample, method=method,
bounds_error=False)
for j in range(6)]
v2 = np.array(vs).transpose(1, 2, 0)
assert_allclose(v, v2, err_msg=method)
# Vector-valued splines supported with fitpack
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
def test_complex(self):
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['linear', 'nearest']:
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
assert_allclose(v1, v2)
# Complex-valued data not supported by spline2fd
with warnings.catch_warnings():
warnings.simplefilter("error", category=np.ComplexWarning)
assert_raises(np.ComplexWarning, interpn, points, values,
sample, method='splinef2d')
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
assert_allclose(v1, v2)
def test_matrix_input(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.matrix(np.random.rand(5, 7))
sample = np.random.rand(3, 7, 2)
for method in ('nearest', 'linear', 'splinef2d'):
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
assert_allclose(v1, np.asmatrix(v2))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
DavidResin/aps-aalto | stitch/cv/lib/python3.4/site-packages/pip/_vendor/lockfile/linklockfile.py | 536 | 2652 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
| gpl-3.0 |
endlessm/chromium-browser | third_party/chromite/scripts/sysmon/osinfo_metrics_unittest.py | 1 | 1594 | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for osinfo_metrics."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import print_function
import sys
import mock
from chromite.lib import cros_test_lib
from chromite.scripts.sysmon import osinfo_metrics
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class TestOSInfoMetrics(cros_test_lib.TestCase):
"""Tests for osinfo_metrics."""
def setUp(self):
patcher = mock.patch('infra_libs.ts_mon.common.interface.state.store',
autospec=True)
self.store = patcher.start()
self.addCleanup(patcher.stop)
def test_collect(self):
with mock.patch('platform.system', autospec=True) as system, \
mock.patch('platform.dist', autospec=True) as dist, \
mock.patch('sys.maxsize', 2**64):
system.return_value = 'Linux'
dist.return_value = ('Ubuntu', '14.04', 'trusty')
osinfo_metrics.collect_os_info()
setter = self.store.set
calls = [
mock.call('proc/os/name', (), None, 'ubuntu', enforce_ge=mock.ANY),
mock.call('proc/os/version', (), None, '14.04', enforce_ge=mock.ANY),
mock.call('proc/os/arch', (), None, 'x86_64', enforce_ge=mock.ANY),
mock.call('proc/python/arch', (), None, '64', enforce_ge=mock.ANY),
]
setter.assert_has_calls(calls)
self.assertEqual(len(setter.mock_calls), len(calls))
| bsd-3-clause |
KasperPRasmussen/bokeh | bokeh/models/tests/test_renderers.py | 20 | 1506 | from __future__ import absolute_import
import unittest
from bokeh.plotting import figure
from bokeh.models.ranges import DataRange1d
class TestGlyphRenderer(unittest.TestCase):
def test_warning_about_colons_in_column_labels_for_axis(self):
invalid_labels = ['0', '1', '2:0']
plot = figure(
x_range=invalid_labels,
y_range=invalid_labels,
plot_width=900,
plot_height=400,
)
errors = plot._check_colon_in_category_label()
self.assertEqual(errors, [(
1003,
'MALFORMED_CATEGORY_LABEL',
'Category labels cannot contain colons',
'[range:x_range] [first_value: 2:0] '
'[range:y_range] [first_value: 2:0] '
'[renderer: Figure, ViewModel:Plot, ref _id: '
'%s]' % plot._id
)])
def test_validates_colons_only_in_factorial_range(self):
plot = figure(
x_range=DataRange1d(start=0.0, end=2.2),
y_range=['0', '1', '2:0'],
plot_width=900,
plot_height=400,
)
errors = plot._check_colon_in_category_label()
self.assertEqual(errors, [(
1003,
'MALFORMED_CATEGORY_LABEL',
'Category labels cannot contain colons',
'[range:y_range] [first_value: 2:0] '
'[renderer: Figure, ViewModel:Plot, ref _id: '
'%s]' % plot._id
)])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
KhalidGit/flask | Work/Trivia - Module 5/env/Lib/site-packages/werkzeug/testsuite/http.py | 145 | 18911 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.http
~~~~~~~~~~~~~~~~~~~~~~~
HTTP parsing utilities.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from datetime import datetime
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug._compat import itervalues, wsgi_encoding_dance
from werkzeug import http, datastructures
from werkzeug.test import create_environ
class HTTPUtilityTestCase(WerkzeugTestCase):
def test_accept(self):
a = http.parse_accept_header('en-us,ru;q=0.5')
self.assert_equal(list(itervalues(a)), ['en-us', 'ru'])
self.assert_equal(a.best, 'en-us')
self.assert_equal(a.find('ru'), 1)
self.assert_raises(ValueError, a.index, 'de')
self.assert_equal(a.to_header(), 'en-us,ru;q=0.5')
def test_mime_accept(self):
a = http.parse_accept_header('text/xml,application/xml,'
'application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept)
self.assert_raises(ValueError, lambda: a['missing'])
self.assert_equal(a['image/png'], 1)
self.assert_equal(a['text/plain'], 0.8)
self.assert_equal(a['foo/bar'], 0.5)
self.assert_equal(a[a.find('foo/bar')], ('*/*', 0.5))
def test_accept_matches(self):
a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png', datastructures.MIMEAccept)
self.assert_equal(a.best_match(['text/html', 'application/xhtml+xml']),
'application/xhtml+xml')
self.assert_equal(a.best_match(['text/html']), 'text/html')
self.assert_true(a.best_match(['foo/bar']) is None)
self.assert_equal(a.best_match(['foo/bar', 'bar/foo'],
default='foo/bar'), 'foo/bar')
self.assert_equal(a.best_match(['application/xml', 'text/xml']), 'application/xml')
def test_charset_accept(self):
a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7',
datastructures.CharsetAccept)
self.assert_equal(a['iso-8859-1'], a['iso8859-1'])
self.assert_equal(a['iso-8859-1'], 1)
self.assert_equal(a['UTF8'], 0.7)
self.assert_equal(a['ebcdic'], 0.7)
def test_language_accept(self):
a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5',
datastructures.LanguageAccept)
self.assert_equal(a.best, 'de-AT')
self.assert_true('de_AT' in a)
self.assert_true('en' in a)
self.assert_equal(a['de-at'], 1)
self.assert_equal(a['en'], 0.5)
def test_set_header(self):
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
self.assert_true('blah baz' in hs)
self.assert_true('foobar' not in hs)
self.assert_true('foo' in hs)
self.assert_equal(list(hs), ['foo', 'Bar', 'Blah baz', 'Hehe'])
hs.add('Foo')
self.assert_equal(hs.to_header(), 'foo, Bar, "Blah baz", Hehe')
def test_list_header(self):
hl = http.parse_list_header('foo baz, blah')
self.assert_equal(hl, ['foo baz', 'blah'])
def test_dict_header(self):
d = http.parse_dict_header('foo="bar baz", blah=42')
self.assert_equal(d, {'foo': 'bar baz', 'blah': '42'})
def test_cache_control_header(self):
cc = http.parse_cache_control_header('max-age=0, no-cache')
assert cc.max_age == 0
assert cc.no_cache
cc = http.parse_cache_control_header('private, community="UCI"', None,
datastructures.ResponseCacheControl)
assert cc.private
assert cc['community'] == 'UCI'
c = datastructures.ResponseCacheControl()
assert c.no_cache is None
assert c.private is None
c.no_cache = True
assert c.no_cache == '*'
c.private = True
assert c.private == '*'
del c.private
assert c.private is None
assert c.to_header() == 'no-cache'
def test_authorization_header(self):
a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
assert a.type == 'basic'
assert a.username == 'Aladdin'
assert a.password == 'open sesame'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth,
nc=00000001,
cnonce="0a4f113b",
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert 'auth' in a.qop
assert a.nc == '00000001'
assert a.cnonce == '0a4f113b'
assert a.response == '6629fae49393a05397450978507c4ef1'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="testrealm@host.invalid",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
response="e257afa1414a3340d93d30955171dd0e",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == 'testrealm@host.invalid'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert a.response == 'e257afa1414a3340d93d30955171dd0e'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
assert http.parse_authorization_header('') is None
assert http.parse_authorization_header(None) is None
assert http.parse_authorization_header('foo') is None
def test_www_authenticate_header(self):
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
assert wa.type == 'basic'
assert wa.realm == 'WallyWorld'
wa.realm = 'Foo Bar'
assert wa.to_header() == 'Basic realm="Foo Bar"'
wa = http.parse_www_authenticate_header('''Digest
realm="testrealm@host.com",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert wa.type == 'digest'
assert wa.realm == 'testrealm@host.com'
assert 'auth' in wa.qop
assert 'auth-int' in wa.qop
assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
wa = http.parse_www_authenticate_header('broken')
assert wa.type == 'broken'
assert not http.parse_www_authenticate_header('').type
assert not http.parse_www_authenticate_header('')
def test_etags(self):
assert http.quote_etag('foo') == '"foo"'
assert http.quote_etag('foo', True) == 'w/"foo"'
assert http.unquote_etag('"foo"') == ('foo', False)
assert http.unquote_etag('w/"foo"') == ('foo', True)
es = http.parse_etags('"foo", "bar", w/"baz", blar')
assert sorted(es) == ['bar', 'blar', 'foo']
assert 'foo' in es
assert 'baz' not in es
assert es.contains_weak('baz')
assert 'blar' in es
assert es.contains_raw('w/"baz"')
assert es.contains_raw('"foo"')
assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'w/"baz"']
def test_etags_nonzero(self):
etags = http.parse_etags('w/"foo"')
self.assert_true(bool(etags))
self.assert_true(etags.contains_raw('w/"foo"'))
def test_parse_date(self):
assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('foo') is None
def test_parse_date_overflows(self):
assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37)
assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0)
assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None
def test_remove_entity_headers(self):
now = http.http_date()
headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')]
headers2 = datastructures.Headers(headers1)
http.remove_entity_headers(headers1)
assert headers1 == [('Date', now)]
http.remove_entity_headers(headers2)
self.assert_equal(headers2, datastructures.Headers([(u'Date', now)]))
def test_remove_hop_by_hop_headers(self):
headers1 = [('Connection', 'closed'), ('Foo', 'bar'),
('Keep-Alive', 'wtf')]
headers2 = datastructures.Headers(headers1)
http.remove_hop_by_hop_headers(headers1)
assert headers1 == [('Foo', 'bar')]
http.remove_hop_by_hop_headers(headers2)
assert headers2 == datastructures.Headers([('Foo', 'bar')])
def test_parse_options_header(self):
assert http.parse_options_header(r'something; foo="other\"thing"') == \
('something', {'foo': 'other"thing'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == \
('something', {'foo': 'other"thing', 'meh': '42'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42; bleh') == \
('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="other;thing"; meh=42; bleh') == \
('something', {'foo': 'other;thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == \
('something', {'foo': 'otherthing', 'meh': None, 'bleh': None})
def test_dump_options_header(self):
assert http.dump_options_header('foo', {'bar': 42}) == \
'foo; bar=42'
assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) in \
('foo; bar=42; fizz', 'foo; fizz; bar=42')
def test_dump_header(self):
assert http.dump_header([1, 2, 3]) == '1, 2, 3'
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"'
assert http.dump_header({'foo': 'bar'}) == 'foo=bar'
def test_is_resource_modified(self):
env = create_environ()
# ignore POST
env['REQUEST_METHOD'] = 'POST'
assert not http.is_resource_modified(env, etag='testing')
env['REQUEST_METHOD'] = 'GET'
# etagify from data
self.assert_raises(TypeError, http.is_resource_modified, env,
data='42', etag='23')
env['HTTP_IF_NONE_MATCH'] = http.generate_etag(b'awesome')
assert not http.is_resource_modified(env, data=b'awesome')
env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30))
assert not http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 12, 00))
assert http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 13, 00))
def test_date_formatting(self):
assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT'
assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT'
def test_cookies(self):
self.assert_strict_equal(
dict(http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd'
'c762809248d4beed; a=42; b="\\\";"')),
{
'CP': u'null*',
'PHPSESSID': u'0a539d42abc001cdc762809248d4beed',
'a': u'42',
'dismiss-top': u'6',
'b': u'\";'
}
)
self.assert_strict_equal(
set(http.dump_cookie('foo', 'bar baz blub', 360, httponly=True,
sync_expires=False).split(u'; ')),
set([u'HttpOnly', u'Max-Age=360', u'Path=/', u'foo="bar baz blub"'])
)
self.assert_strict_equal(dict(http.parse_cookie('fo234{=bar; blub=Blah')),
{'fo234{': u'bar', 'blub': u'Blah'})
def test_cookie_quoting(self):
val = http.dump_cookie("foo", "?foo")
self.assert_strict_equal(val, 'foo="?foo"; Path=/')
self.assert_strict_equal(dict(http.parse_cookie(val)), {'foo': u'?foo'})
self.assert_strict_equal(dict(http.parse_cookie(r'foo="foo\054bar"')),
{'foo': u'foo,bar'})
def test_cookie_domain_resolving(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
def test_cookie_unicode_dumping(self):
val = http.dump_cookie('foo', u'\N{SNOWMAN}')
h = datastructures.Headers()
h.add('Set-Cookie', val)
self.assert_equal(h['Set-Cookie'], 'foo="\\342\\230\\203"; Path=/')
cookies = http.parse_cookie(h['Set-Cookie'])
self.assert_equal(cookies['foo'], u'\N{SNOWMAN}')
def test_cookie_unicode_keys(self):
# Yes, this is technically against the spec but happens
val = http.dump_cookie(u'fö', u'fö')
self.assert_equal(val, wsgi_encoding_dance(u'fö="f\\303\\266"; Path=/', 'utf-8'))
cookies = http.parse_cookie(val)
self.assert_equal(cookies[u'fö'], u'fö')
def test_cookie_unicode_parsing(self):
# This is actually a correct test. This is what is being submitted
# by firefox if you set an unicode cookie and we get the cookie sent
# in on Python 3 under PEP 3333.
cookies = http.parse_cookie(u'fö=fö')
self.assert_equal(cookies[u'fö'], u'fö')
def test_cookie_domain_encoding(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=.xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.foo.com')
self.assert_strict_equal(val, 'foo=bar; Domain=.foo.com; Path=/')
class RangeTestCase(WerkzeugTestCase):
def test_if_range_parsing(self):
rv = http.parse_if_range_header('"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# weak information is dropped
rv = http.parse_if_range_header('w/"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# broken etags are supported too
rv = http.parse_if_range_header('bullshit')
assert rv.etag == 'bullshit'
assert rv.date is None
assert rv.to_header() == '"bullshit"'
rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT')
assert rv.etag is None
assert rv.date == datetime(1970, 1, 1)
assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT'
for x in '', None:
rv = http.parse_if_range_header(x)
assert rv.etag is None
assert rv.date is None
assert rv.to_header() == ''
def test_range_parsing():
rv = http.parse_range_header('bytes=52')
assert rv is None
rv = http.parse_range_header('bytes=52-')
assert rv.units == 'bytes'
assert rv.ranges == [(52, None)]
assert rv.to_header() == 'bytes=52-'
rv = http.parse_range_header('bytes=52-99')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100)]
assert rv.to_header() == 'bytes=52-99'
rv = http.parse_range_header('bytes=52-99,-1000')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100), (-1000, None)]
assert rv.to_header() == 'bytes=52-99,-1000'
rv = http.parse_range_header('bytes = 1 - 100')
assert rv.units == 'bytes'
assert rv.ranges == [(1, 101)]
assert rv.to_header() == 'bytes=1-100'
rv = http.parse_range_header('AWesomes=0-999')
assert rv.units == 'awesomes'
assert rv.ranges == [(0, 1000)]
assert rv.to_header() == 'awesomes=0-999'
def test_content_range_parsing():
rv = http.parse_content_range_header('bytes 0-98/*')
assert rv.units == 'bytes'
assert rv.start == 0
assert rv.stop == 99
assert rv.length is None
assert rv.to_header() == 'bytes 0-98/*'
rv = http.parse_content_range_header('bytes 0-98/*asdfsa')
assert rv is None
rv = http.parse_content_range_header('bytes 0-99/100')
assert rv.to_header() == 'bytes 0-99/100'
rv.start = None
rv.stop = None
assert rv.units == 'bytes'
assert rv.to_header() == 'bytes */100'
rv = http.parse_content_range_header('bytes */100')
assert rv.start is None
assert rv.stop is None
assert rv.length == 100
assert rv.units == 'bytes'
class RegressionTestCase(WerkzeugTestCase):
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept).best_match(['foo/bar'])
self.assert_equal(rv, 'foo/bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(HTTPUtilityTestCase))
suite.addTest(unittest.makeSuite(RegressionTestCase))
return suite
| apache-2.0 |
tbeadle/django | tests/proxy_model_inheritance/tests.py | 89 | 2089 | from __future__ import absolute_import, unicode_literals
import os
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils._os import upath
from .models import (
ConcreteModel, ConcreteModelSubclass, ConcreteModelSubclassProxy,
ProxyModel,
)
class ProxyModelInheritanceTests(TransactionTestCase):
"""
Proxy model inheritance across apps can result in migrate not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls migrate, then verifies that the table has been created.
"""
available_apps = []
def test_table_exists(self):
with extend_sys_path(os.path.dirname(os.path.abspath(upath(__file__)))):
with self.modify_settings(INSTALLED_APPS={'append': ['app1', 'app2']}):
call_command('migrate', verbosity=0, run_syncdb=True)
from app1.models import ProxyModel
from app2.models import NiceModel
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
class MultiTableInheritanceProxyTest(TestCase):
def test_model_subclass_proxy(self):
"""
Deleting an instance of a model proxying a multi-table inherited
subclass should cascade delete down the whole inheritance chain (see
#18083).
"""
instance = ConcreteModelSubclassProxy.objects.create()
instance.delete()
self.assertEqual(0, ConcreteModelSubclassProxy.objects.count())
self.assertEqual(0, ConcreteModelSubclass.objects.count())
self.assertEqual(0, ConcreteModel.objects.count())
def test_deletion_through_intermediate_proxy(self):
child = ConcreteModelSubclass.objects.create()
proxy = ProxyModel.objects.get(pk=child.pk)
proxy.delete()
self.assertFalse(ConcreteModel.objects.exists())
self.assertFalse(ConcreteModelSubclass.objects.exists())
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/terminal/pt_inputhooks/pyglet.py | 2 | 2367 | """Enable pyglet to be used interacively with prompt_toolkit
"""
import sys
import time
from timeit import default_timer as clock
import pyglet
# On linux only, window.flip() has a bug that causes an AttributeError on
# window close. For details, see:
# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
if sys.platform.startswith('linux'):
def flip(window):
try:
window.flip()
except AttributeError:
pass
else:
def flip(window):
window.flip()
def inputhook(context):
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
t = clock()
while not context.input_is_ready():
pyglet.clock.tick()
for window in pyglet.app.windows:
window.switch_to()
window.dispatch_events()
window.dispatch_event('on_draw')
flip(window)
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
| bsd-2-clause |
hyesun03/k-board | kboard/functional_test/test_post_creation_and_management.py | 1 | 10220 | from selenium.common.exceptions import NoSuchElementException
from .base import FunctionalTest, logout_current_user, login_test_user_with_browser
class NewVisitorTest(FunctionalTest):
def test_default_page(self):
# 지훈이는 멋진 게시판 앱이 나왔다는 소식을 듣고
# 해당 웹 사이트를 확인하러 간다.
self.browser.get(self.live_server_url)
# 타이틀이 'Home'를 표시하고 있다.
self.assertIn('Home', self.browser.title)
# header navbar의 로고에 'K-Board'라고 씌여져 있다.
logo_text = self.browser.find_element_by_class_name('navbar-brand')
self.assertEqual('K-Board', logo_text.text)
# navbar에 'Default' 게시판이 보인다.
navbar_item = self.browser.find_elements_by_class_name('navbar-item')
self.assertEqual('Default', navbar_item[0].text)
# 박스에 게시판 하나가 보인다.
boards = self.browser.find_elements_by_class_name('panel-post-summary')
self.assertEqual(len(boards), 1)
# 그 게시판에는 'Default'라고 씌여져 있다.
panel_title = boards[0].find_element_by_css_selector('.panel-heading > a')
self.assertEqual(panel_title.text, 'Default')
# 지훈이는 첫 번째에 있는 'Default'게시판에 들어간다.
self.move_to_default_board()
# 게시판에 아무런 글이 없다.
tbody = self.browser.find_element_by_tag_name('tbody')
with self.assertRaises(NoSuchElementException):
tbody.find_element_by_tag_name('tr')
# 글 하나를 작성한다.
self.add_post('Hello', 'Hello guys')
# 지훈이는 다른 게시판이 있나 보려고 로고 버튼을 눌러 게시판 목록 페이지로 돌아간다.
home_button = self.browser.find_element_by_class_name('navbar-brand')
home_button.click()
# url이 / 이다.
self.assertRegex(self.browser.current_url, '.+/$')
# Default 게시판 panel에 작성한 글이 보인다.
boards = self.browser.find_elements_by_class_name('panel-post-summary')
panel_title = boards[0].find_element_by_css_selector('.panel-heading > a')
panel_posts = boards[0].find_elements_by_css_selector('table tr')
self.assertEqual(panel_title.text, 'Default')
self.assertEqual(len(panel_posts), 1)
self.assertEqual(panel_posts[0].text, 'Hello')
@login_test_user_with_browser
def test_write_post_and_confirm_post_view(self):
self.move_to_default_board()
# 지훈이는 새 게시글을 작성하기 위해 글 쓰기 버튼을 누른다.
self.click_create_post_button()
# 글 쓰기 페이지로 이동한다.
self.assertRegex(self.browser.current_url, '.+/boards/default/posts/new/')
# 웹 페이지 타이틀과 헤더가 'Create Post'를 표시하고 있다.
header_text = self.browser.find_element_by_tag_name('h3').text
self.assertIn('글 쓰기', self.browser.title)
self.assertIn('글 쓰기', header_text)
# 제목을 입력하는 상자에 'Insert Title'라고 씌여 있다.
titlebox = self.browser.find_element_by_id('id_post_title')
self.assertEqual(
titlebox.get_attribute('placeholder'),
'Insert Title'
)
# "Title of This Post"라고 제목 상자에 입력한다.
titlebox.send_keys('Title of This Post')
contentbox = self.get_contentbox()
# "Content of This Post"라고 본문 상자에 입력한다.
contentbox.send_keys('Content of This Post')
self.browser.switch_to.default_content()
# 하단의 등록 버튼을 누르면 글 작성이 완료되고 게시글 목록으로 돌아간다.
self.click_submit_button()
self.assertRegex(self.browser.current_url, '.+/boards/default/')
# 게시글 목록 페이지의 타이틀에 'Default'라고 씌여져 있다.
header_text = self.browser.find_element_by_tag_name('h3').text
self.assertIn('Default', self.browser.title)
self.assertIn('Default', header_text)
# 게시글 목록에 "1: Title of This Post"라고 씌여져 있다.
self.check_for_row_in_list_table('id_post_list_table', 'Title of This Post')
# 게시글 목록 하단에 있는 '글쓰기' 버튼을 눌러서 새 글을 작성한다.
self.click_create_post_button()
# "Title of Second Post"라고 제목 상자에 입력한다.
titlebox = self.browser.find_element_by_id('id_post_title')
titlebox.send_keys('Title of Second Post')
# "Content of Second Post"라고 본문 상자에 입력한다
contentbox = self.get_contentbox()
contentbox.send_keys('Content of Second Post')
self.browser.switch_to.default_content()
# 하단의 등록 버든틍 누르면 글 작성이 완료되고 게시글 목록으로 돌아간다.
self.click_submit_button()
self.assertRegex(self.browser.current_url, '.+/boards/default/')
# 게시글 목록에 두 개의 게시글 제목이 보인다.
self.check_for_row_in_list_table('id_post_list_table', 'Title of Second Post')
self.check_for_row_in_list_table('id_post_list_table', 'Title of This Post')
# 지훈이는 게시글이 잘 작성 되었는지 확인하고 싶어졌다.
# 'Title of This Post' 게시글을 클릭한다.
table = self.browser.find_element_by_id('id_post_list_table')
rows = table.find_elements_by_css_selector('tbody > tr > td > a')
rows[1].click()
# 게시글에 대한 자세한 내용을 보여주는 새로운 창이 뜬다.
self.assertRegex(self.browser.current_url, '.+/posts/(\d+)/')
# 게시글 페이지의 타이틀에는 'Title of This Post'라고 씌여져 있다.
self.assertIn('Title of This Post', self.browser.title)
# 게시글의 제목에는 'Title of This Post'이 표시되고
post_title = self.browser.find_element_by_css_selector('.post-panel .panel-title').text
self.assertIn('Title of This Post', post_title)
# 게시글의 내용에는 'Content of This Post'이 표시된다.
post_content = self.browser.find_element_by_css_selector('.post-panel .panel-body').text
self.assertIn('Content of This Post', post_content)
# 게시글의 제목 옆에는 IP가 표시된다.
post_ip = self.browser.find_element_by_id('id_post_ip').text
self.assertRegex(post_ip, 'IP: \d{1,3}\.\d{1,3}\.xxx\.\d{1,3}')
# 지훈이는 게시글 내용 하단의 댓글 란에 'This is a comment'라고 입력한다.
comment_iframe = self.browser.find_element_by_class_name('comment-iframe')
self.browser.switch_to.frame(comment_iframe)
comment = self.browser.find_element_by_id('id_new_comment')
comment.send_keys('This is a comment')
# '댓글 달기' 버튼을 누른다.
comment_submit = self.browser.find_element_by_id('id_new_comment_submit')
comment_submit.click()
# 댓글이 달리고, 'This is a comment'라는 댓글이 보인다.
comment_list = self.browser.find_element_by_class_name("comment")
comments = comment_list.find_elements_by_tag_name('p')
self.assertEqual(comments[0].text, 'This is a comment')
# 댓글에는 작성된 시간이 표시된다.
comment_date = comment_list.find_element_by_class_name('comment-date')
self.assertRegex(comment_date.text, '\d{4}-[01]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d')
# 댓글에는 IP도 표시된다.
comment_ip = comment_list.find_element_by_class_name('comment-ip')
self.assertRegex(comment_ip.text, '\d{1,3}\.\d{1,3}\.xxx\.\d{1,3}')
# 댓글이 마음에 들지 않아 다시 삭제하려고 한다. 댓글 우측에 삭제 버튼을 누른다.
remove_comment_button = self.browser.find_element_by_class_name("delete-comment")
remove_comment_button.click()
# 남아있는 댓글이 없는 것을 확인한다.
self.browser.find_elements_by_css_selector(".no-comment")
# 게시글과 댓글이 잘 삭제된 것을 확인한 지훈이는 다시 게시글 목록을 보여주는 페이지로 돌아가기 위해 게시글 하단의 '목록' 버튼을 누른다.
self.browser.switch_to.default_content()
create_post_button = self.browser.find_element_by_id('id_back_to_post_list_button')
create_post_button.click()
# 게시글 목록 페이지가 뜬다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
# 지훈이는 새 게시글 작성 중에 취소 기능을 확인하기 위해 다시 '글쓰기' 버튼을 누른다
self.click_create_post_button()
# 취소 버튼을 누르면
self.browser.find_element_by_id('id_cancel_button').click()
# 게시글 목록 페이지로 돌아온다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
@login_test_user_with_browser
def test_forbid_comment_input_when_does_not_login(self):
# 지훈이는 로그인을 한 상태로 글을 작성한다.
self.move_to_default_board()
self.add_post('hello', 'content')
# 게시글 목록 페이지가 보여지고 있다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
# 익명으로 댓글을 달고 싶어 로그아웃한다.
logout_current_user(self)
# 게시판에 들어간다.
self.move_to_default_board()
# 게시글에 들어간다.
post_list = self.browser.find_elements_by_css_selector('#id_post_list_table > tbody > tr > td > a')
post_list[0].click()
# 댓글 입력하는 form이 없고 '댓글을 달기 위해 로그인하세요.'가 보인다.
comment_iframe = self.browser.find_element_by_class_name('comment-iframe')
self.browser.switch_to.frame(comment_iframe)
self.browser.find_element_by_class_name('comment-require-login')
| mit |
cubicova17/annet | venv/lib/python2.7/site-packages/south/tests/inspector.py | 142 | 3896 |
from south.tests import Monkeypatcher, skipUnless
from south.modelsinspector import (convert_on_delete_handler, get_value,
IsDefault, models, value_clean)
from fakeapp.models import HorribleModel, get_sentinel_object
on_delete_is_available = hasattr(models, "PROTECT") # models here is django.db.models
skipUnlessOnDeleteAvailable = skipUnless(on_delete_is_available, "not testing on_delete -- not available on Django<1.3")
class TestModelInspector(Monkeypatcher):
"""
Tests if the various parts of the modelinspector work.
"""
def test_get_value(self):
# Let's start nicely.
name = HorribleModel._meta.get_field_by_name("name")[0]
slug = HorribleModel._meta.get_field_by_name("slug")[0]
user = HorribleModel._meta.get_field_by_name("user")[0]
# Simple int retrieval
self.assertEqual(
get_value(name, ["max_length", {}]),
"255",
)
# Bool retrieval
self.assertEqual(
get_value(slug, ["unique", {}]),
"True",
)
# String retrieval
self.assertEqual(
get_value(user, ["rel.related_name", {}]),
"'horribles'",
)
# Default triggering
self.assertEqual(
get_value(slug, ["unique", {"default": False}]),
"True",
)
self.assertRaises(
IsDefault,
get_value,
slug,
["unique", {"default": True}],
)
@skipUnlessOnDeleteAvailable
def test_get_value_on_delete(self):
# First validate the FK fields with on_delete options
o_set_null_on_delete = HorribleModel._meta.get_field_by_name("o_set_null_on_delete")[0]
o_cascade_delete = HorribleModel._meta.get_field_by_name("o_cascade_delete")[0]
o_protect = HorribleModel._meta.get_field_by_name("o_protect")[0]
o_default_on_delete = HorribleModel._meta.get_field_by_name("o_default_on_delete")[0]
o_set_on_delete_function = HorribleModel._meta.get_field_by_name("o_set_on_delete_function")[0]
o_set_on_delete_value = HorribleModel._meta.get_field_by_name("o_set_on_delete_value")[0]
o_no_action_on_delete = HorribleModel._meta.get_field_by_name("o_no_action_on_delete")[0]
# TODO this is repeated from the introspection_details in modelsinspector:
# better to refactor that so we can reference these settings, in case they
# must change at some point.
on_delete = ["rel.on_delete", {"default": models.CASCADE, "is_django_function": True, "converter": convert_on_delete_handler, }]
# Foreign Key cascade update/delete
self.assertRaises(
IsDefault,
get_value,
o_cascade_delete,
on_delete,
)
self.assertEqual(
get_value(o_protect, on_delete),
"models.PROTECT",
)
self.assertEqual(
get_value(o_no_action_on_delete, on_delete),
"models.DO_NOTHING",
)
self.assertEqual(
get_value(o_set_null_on_delete, on_delete),
"models.SET_NULL",
)
self.assertEqual(
get_value(o_default_on_delete, on_delete),
"models.SET_DEFAULT",
)
# For now o_set_on_delete raises, see modelsinspector.py
#self.assertEqual(
# get_value(o_set_on_delete_function, on_delete),
# "models.SET(get_sentinel_object)",
#)
self.assertRaises(
ValueError,
get_value,
o_set_on_delete_function,
on_delete,
)
self.assertEqual(
get_value(o_set_on_delete_value, on_delete),
"models.SET(%s)" % value_clean(get_sentinel_object()),
)
| mit |
teochenglim/ansible-modules-extras | cloud/amazon/lambda_event.py | 16 | 14517 | #!/usr/bin/python
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
AWS Lambda invokes the function.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: false
default: stream
choices: ['stream']
source_params:
description:
- Sub-parameters required for event source.
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: show source event
debug: var=lambda_stream_events
'''
RETURN = '''
---
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
'''
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, use_boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
"""
Order object for comparison purposes
:param obj:
:return:
"""
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
"""
Sets module sub-parameters to those expected by the boto3 API.
:param params:
:return:
"""
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# check if 'function_name' needs to be expanded in full ARN format
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
# ---------------------------------------------------------------------------------------------------
#
# Lambda Event Handlers
#
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
# the execution of a Lambda function (pull only).
#
# ---------------------------------------------------------------------------------------------------
def lambda_event_stream(module, aws):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
:param aws:
:return:
"""
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
# check if required sub-parameters are present and valid
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
# check if optional sub-parameters are valid, if present
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
# check if event mapping exist
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
if source_arn:
api_params.update(Enabled=source_param_enabled)
if source_params.get('batch_size'):
api_params.update(BatchSize=source_params.get('batch_size'))
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
# current_state is 'present'
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
# check if anything changed
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
# remove the stream event mapping
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
def main():
"""Produce a list of function suffixes which handle lambda events."""
this_module = sys.modules[__name__]
source_choices = ["stream"]
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=True, default="stream", choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/autobahntestsuite/case/case9_2_6.py | 14 | 1173 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_2_1 import *
class Case9_2_6(Case9_2_1):
DESCRIPTION = """Send binary message message with payload of length 16 * 2**20 (16M)."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent)."""
def init(self):
self.DATALEN = 16 * 2**20
self.PAYLOAD = "\x00\xfe\x23\xfa\xf0"
self.WAITSECS = 100
self.reportTime = True
| apache-2.0 |
girving/tensorflow | tensorflow/python/keras/engine/input_layer.py | 4 | 8466 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Input layer code (`Input` and `InputLayer`).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.InputLayer')
class InputLayer(base_layer.Layer):
"""Layer to be used as an entry point into a Network (a graph of layers).
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create its a placeholder tensor (pass arguments `input_shape`, and
optionally, `dtype`).
It is generally recommend to use the functional layer API via `Input`,
(which creates an `InputLayer`) without directly using `InputLayer`.
Arguments:
input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis).
batch_size: Optional input batch size (integer or None).
dtype: Datatype of the input.
input_tensor: Optional tensor to use as layer input
instead of creating a placeholder.
sparse: Boolean, whether the placeholder created
is meant to be sparse.
name: Name of the layer (string).
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(K.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = K.floatx()
else:
dtype = K.dtype(input_tensor)
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
self.supports_masking = True
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
if context.executing_eagerly():
# In eager mode, create a temporary placeholder to call the layer on.
input_tensor = base_layer.DeferredTensor( # pylint: disable=protected-access
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = array_ops.sparse_placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
input_tensor = array_ops.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
# For compatibility with Keras API.
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
# For compatibility with Keras API.
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.get_shape().as_list())
if context.executing_eagerly():
raise ValueError('You should not pass an input tensor when executing '
'in eager mode. For example, instead of creating an '
'InputLayer, you should instantiate your model and '
'directly call it on your input.')
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
base_layer.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
def get_config(self):
config = {
'batch_input_shape': self._batch_input_shape,
'dtype': self.dtype,
'sparse': self.sparse,
'name': self.name
}
return config
@tf_export('keras.layers.Input', 'keras.Input')
def Input( # pylint: disable=invalid-name
shape=None,
batch_size=None,
name=None,
dtype=None,
sparse=False,
tensor=None,
**kwargs):
"""`Input()` is used to instantiate a Keras tensor.
A Keras tensor is a tensor object from the underlying backend
(Theano or TensorFlow), which we augment with certain
attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if a, b and c are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
The added Keras attribute is:
`_keras_history`: Last layer applied to the tensor.
the entire layer graph is retrievable from that layer,
recursively.
Arguments:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
sparse: A boolean specifying whether the placeholder
to be created is sparse.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will not create a placeholder tensor.
**kwargs: deprecated arguments support.
Returns:
A tensor.
Example:
```python
# this is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Raises:
ValueError: in case of invalid arguments.
"""
if 'batch_shape' in kwargs:
batch_shape = kwargs.pop('batch_shape')
if shape and batch_shape:
raise ValueError('Only provide the shape OR '
'batch_shape argument to '
'Input, not both at the same time.')
batch_size = batch_shape[0]
shape = batch_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if dtype is None:
dtype = K.floatx()
if shape is None and tensor is None:
raise ValueError('Please provide to Input either a `shape`'
' or a `tensor` argument. Note that '
'`shape` does not include the batch '
'dimension.')
input_layer = InputLayer(
input_shape=shape,
batch_size=batch_size,
name=name,
dtype=dtype,
sparse=sparse,
input_tensor=tensor)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].output_tensors
if len(outputs) == 1:
return outputs[0]
else:
return outputs
| apache-2.0 |
richardcs/ansible | lib/ansible/modules/cloud/google/gcp_compute_region_disk_facts.py | 9 | 10094 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_region_disk_facts
description:
- Gather facts for GCP RegionDisk
short_description: Gather facts for GCP RegionDisk
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
region:
description:
- A reference to the region where the disk resides.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a region disk facts
gcp_compute_region_disk_facts:
region: us-central1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
lastAttachTimestamp:
description:
- Last attach timestamp in RFC3339 text format.
returned: success
type: str
lastDetachTimestamp:
description:
- Last dettach timestamp in RFC3339 text format.
returned: success
type: str
labels:
description:
- Labels to apply to this disk. A list of key->value pairs.
returned: success
type: dict
licenses:
description:
- Any applicable publicly visible licenses.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sizeGb:
description:
- Size of the persistent disk, specified in GB. You can specify this field when
creating a persistent disk using the sourceImage or sourceSnapshot parameter,
or specify it alone to create an empty persistent disk.
- If you specify this field along with sourceImage or sourceSnapshot, the value
of sizeGb must not be less than the size of the sourceImage or the size of
the snapshot.
returned: success
type: int
users:
description:
- 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
.'
returned: success
type: list
replicaZones:
description:
- URLs of the zones where the disk should be replicated to.
returned: success
type: list
type:
description:
- URL of the disk type resource describing which disk type to use to create
the disk. Provide this when creating the disk.
returned: success
type: str
region:
description:
- A reference to the region where the disk resides.
returned: success
type: str
diskEncryptionKey:
description:
- Encrypts the disk using a customer-supplied encryption key.
- After you encrypt a disk with a customer-supplied key, you must provide the
same key if you use the disk later (e.g. to create a disk snapshot or an image,
or to attach the disk to a virtual machine).
- Customer-supplied encryption keys do not protect access to metadata of the
disk.
- If you do not provide an encryption key when creating the disk, then the disk
will be encrypted using an automatically generated key and you do not need
to provide a key to use the disk later.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceSnapshot:
description:
- The source snapshot used to create this disk. You can provide this as a partial
or full URL to the resource.
returned: success
type: dict
sourceSnapshotEncryptionKey:
description:
- The customer-supplied encryption key of the source snapshot. Required if the
source snapshot is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceSnapshotId:
description:
- The unique ID of the snapshot used to create this disk. This value identifies
the exact snapshot that was used to create this persistent disk. For example,
if you created the persistent disk from a snapshot that was later deleted
and recreated under the same name, the source snapshot ID would identify the
exact version of the snapshot that was used.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
filters=dict(type='list', elements='str'),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
patriciolobos/desa8 | openerp/addons/hr_attendance/__init__.py | 434 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
azumimuo/family-xbmc-addon | plugin.video.elysium/resources/lib/sources/myddl.py | 1 | 8226 | # -*- coding: utf-8 -*-
'''
Elysiumtester Add-on
Copyright (C) 2017 Elysiumtester
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import control
debridstatus = control.setting('debridsources')
from BeautifulSoup import BeautifulSoup
from resources.lib.modules.common import random_agent, quality_tag
import requests
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_get_2, cleantitle_query, get_size, cleantitle_get_full
class source:
def __init__(self):
self.domains = ['tinydl.com']
self.base_link = 'http://myddl.pw'
self.search_link = '/?s=%s+%s'
def movie(self, imdb, title, year):
self.elysium_url = []
try:
if not debridstatus == 'true': raise Exception()
self.elysium_url = []
headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
titlecheck = cleanmovie+year
query = self.search_link % (urllib.quote_plus(title), ep_search)
query = urlparse.urljoin(self.base_link, query)
html = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
containers = html.findAll('h2', attrs={'class': 'title'})
for result in containers:
r_title = result.findAll('a')[0]
r_title = r_title.string
r_href = result.findAll('a')[0]["href"]
r_href = r_href.encode('utf-8')
r_title = r_title.encode('utf-8')
c_title = cleantitle_get_2(r_title)
if titlecheck in c_title:
self.elysium_url.append([r_href,r_title])
return self.elysium_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.elysium_url = []
try:
if not debridstatus == 'true': raise Exception()
self.elysium_url = []
headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
cleanmovie = cleantitle.get(title)
data['season'], data['episode'] = season, episode
ep_search = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(ep_search).lower()
titlecheck = cleanmovie+episodecheck
query = self.search_link % (urllib.quote_plus(title), ep_search)
query = urlparse.urljoin(self.base_link, query)
html = BeautifulSoup(requests.get(query, headers=headers, timeout=10).content)
containers = html.findAll('h2', attrs={'class': 'title'})
for result in containers:
r_title = result.findAll('a')[0]
r_title = r_title.string
r_href = result.findAll('a')[0]["href"]
r_href = r_href.encode('utf-8')
r_title = r_title.encode('utf-8')
c_title = cleantitle.get(r_title)
if titlecheck in c_title:
self.elysium_url.append([r_href,r_title])
return self.elysium_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,quality in self.elysium_url:
quality = quality_tag(quality)
request = client.request(movielink,timeout="5")
match = re.compile('<a href="(.+?)" rel=".+?" data-wpel-link="external"').findall(request)
for url in match:
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if any(value in url for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
sources.append({'source': host, 'quality': quality, 'provider': 'Myddl', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url
def _getDOMContent(html, name, match, ret):
end_str = "</%s" % (name)
start_str = '<%s' % (name)
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
if ret:
endstr = html[end:html.find(">", html.find(end_str)) + 1]
result = match + result + endstr
return result
def _getDOMAttributes(match, name, ret):
pattern = '''<%s[^>]* %s\s*=\s*(?:(['"])(.*?)\\1|([^'"].*?)(?:>|\s))''' % (name, ret)
results = re.findall(pattern, match, re.I | re.M | re.S)
return [result[1] if result[1] else result[2] for result in results]
def _getDOMElements(item, name, attrs):
if not attrs:
pattern = '(<%s(?: [^>]*>|/?>))' % (name)
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key in attrs:
pattern = '''(<%s [^>]*%s=['"]%s['"][^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if not this_list and ' ' not in attrs[key]:
pattern = '''(<%s [^>]*%s=%s[^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def parse_dom(html, name='', attrs=None, ret=False):
if attrs is None: attrs = {}
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
print "none"
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return ''
if not name.strip():
return ''
if not isinstance(attrs, dict):
return ''
ret_lst = []
for item in html:
for match in re.findall('(<[^>]*\n[^>]*>)', item):
item = item.replace(match, match.replace('\n', ' ').replace('\r', ' '))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
lst2 = []
for match in lst:
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)):]
lst2.append(temp)
lst = lst2
ret_lst += lst
# log_utils.log("Done: " + repr(ret_lst), xbmc.LOGDEBUG)
return ret_lst
| gpl-2.0 |
rabipanda/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_infeed_test.py | 85 | 5729 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for TPU InfeedQueue methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class InfeedTest(test.TestCase):
def testConstructor(self):
"""Tests that the constructor can be called with different arguments."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(
tuple_types=[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types,
[dtypes.float32, dtypes.int32, dtypes.int32])
self.assertEqual(i.tuple_shapes, None)
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]])
self.assertEqual(i.number_of_tuple_elements, 2)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
self.assertEqual(i.number_of_shards, None)
i = tpu_feed.InfeedQueue(shard_dimensions=[1, 0, 7])
self.assertEqual(i.number_of_tuple_elements, 3)
self.assertEqual(i.tuple_types, None)
self.assertEqual(i.tuple_shapes, None)
self.assertEqual([p.shard_dimension
for p in i.sharding_policies], [1, 0, 7])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue()
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(
number_of_tuple_elements=2, tuple_types=[dtypes.float32])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, tuple_shapes=[[1]])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2, shard_dimensions=[1])
with self.assertRaises(ValueError):
i = tpu_feed.InfeedQueue(tuple_shapes=[[1], [2, 3]], shard_dimensions=[1])
def testModification(self):
"""Tests modification of the queue post-construction."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
i.set_tuple_types([dtypes.float32, dtypes.int32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_tuple_types([dtypes.float32, dtypes.float32])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.float32])
i.set_tuple_shapes([[1], [2, 3]])
self.assertEqual(i.tuple_shapes, [[1], [2, 3]])
i.set_tuple_shapes([[1, 2], [3, 4]])
self.assertEqual(i.tuple_shapes, [[1, 2], [3, 4]])
with self.assertRaises(ValueError):
i.set_tuple_shapes([[1, 2]])
i.set_number_of_shards(2)
self.assertEqual(i.number_of_shards, 2)
i.set_number_of_shards(3)
self.assertEqual(i.number_of_shards, 3)
t1 = constant_op.constant(1, dtypes.int32, shape=[6])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[3, 18])
i.set_configuration_from_input_tensors([t1, t2])
self.assertEqual(i.tuple_shapes, [[6], [3, 18]])
self.assertEqual(i.tuple_types, [dtypes.int32, dtypes.float32])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[6, 18], [12]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
i.set_shard_dimensions([1, 0])
i.set_number_of_shards(3)
with self.assertRaises(ValueError):
i.set_number_of_shards(4)
def testFreezing(self):
"""Tests freezing the queue."""
i = tpu_feed.InfeedQueue(number_of_tuple_elements=2)
t1 = constant_op.constant(1, dtypes.int32, shape=[2])
t2 = constant_op.constant(2.0, dtypes.float32, shape=[2, 4])
i.set_configuration_from_sharded_input_tensors([[t2, t1], [t2, t1]])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
i.freeze()
i.set_number_of_shards(2)
i.set_tuple_shapes([[4, 4], [4]])
i.set_tuple_types([dtypes.float32, dtypes.int32])
i.set_shard_dimensions([0, 0])
with self.assertRaises(ValueError):
i.set_number_of_shards(1)
with self.assertRaises(ValueError):
i.set_tuple_shapes([[8, 8], [8]])
with self.assertRaises(ValueError):
i.set_tuple_types([dtypes.int32, dtypes.float32])
with self.assertRaises(ValueError):
i.set_shard_dimensions([1, 0])
self.assertEqual(i.number_of_shards, 2)
self.assertEqual(i.tuple_shapes, [[4, 4], [4]])
self.assertEqual(i.tuple_types, [dtypes.float32, dtypes.int32])
self.assertEqual(i.shard_dimensions, [0, 0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
thomasaarholt/hyperspy | hyperspy/samfire_utils/goodness_of_fit_tests/test_general.py | 4 | 1052 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
class goodness_test(object):
_tolerance = None
@property
def tolerance(self):
return self._tolerance
@tolerance.setter
def tolerance(self, value):
if value is None:
self._tolerance = None
else:
self._tolerance = np.abs(value)
| gpl-3.0 |
phillipwei/crazyflie-clients-python | lib/cfclient/ui/toolboxes/__init__.py | 32 | 1998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
List all the available toolboxes so they can be used by the UI.
Dropping a new .py file into this directory will automatically list and load
it into the UI when it is started.
"""
__author__ = 'Bitcraze AB'
__all__ = []
import os
import glob
import logging
logger = logging.getLogger(__name__)
foundToolboxes = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*Toolbox.py")]
if len(foundToolboxes) == 0:
foundToolboxes = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*Toolbox.pyc")]
logger.debug("Found toolboxes: %s", foundToolboxes)
toolboxes = []
for tb in foundToolboxes:
tbModule = __import__(tb, globals(), locals(), [tb], -1)
toolboxes.append(getattr(tbModule, tb))
| gpl-2.0 |
juanalfonsopr/odoo | addons/account/account_analytic_line.py | 304 | 7914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.'),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
product_uom_obj = self.pool.get('product.uom')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit_obj = False
if unit:
unit_obj = product_uom_obj.browse(cr, uid, unit, context=context)
if not unit_obj or prod.uom_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
if not unit_obj or prod.uom_po_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
christianbaun/octopuscloud | boto/cloudfront/logging.py | 219 | 1557 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class LoggingInfo(object):
def __init__(self, bucket='', prefix=''):
self.bucket = bucket
self.prefix = prefix
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Bucket':
self.bucket = value
elif name == 'Prefix':
self.prefix = value
else:
setattr(self, name, value)
| apache-2.0 |
ProgVal/Limnoria-test | plugins/Protector/plugin.py | 4 | 7543 | ###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Protector')
class Protector(callbacks.Plugin):
"""Prevents users from doing things they are not supposed to do on a channel,
even if they have +o or +h."""
def isImmune(self, irc, msg):
if not ircutils.isUserHostmask(msg.prefix):
self.log.debug('%q is immune, it\'s a server.', msg)
return True # It's a server prefix.
if ircutils.strEqual(msg.nick, irc.nick):
self.log.debug('%q is immune, it\'s me.', msg)
return True # It's the bot itself.
if msg.nick in self.registryValue('immune', msg.args[0]):
self.log.debug('%q is immune, it\'s configured to be immune.', msg)
return True
return False
def isOp(self, irc, channel, hostmask):
cap = ircdb.makeChannelCapability(channel, 'op')
if ircdb.checkCapability(hostmask, cap):
self.log.debug('%s is an op on %s, it has %s.',
hostmask, channel, cap)
return True
if ircutils.strEqual(hostmask, irc.prefix):
return True
return False
def isProtected(self, irc, channel, hostmask):
cap = ircdb.makeChannelCapability(channel, 'protected')
if ircdb.checkCapability(hostmask, cap):
self.log.debug('%s is protected on %s, it has %s.',
hostmask, channel, cap)
return True
if ircutils.strEqual(hostmask, irc.prefix):
return True
return False
def demote(self, irc, channel, nick):
irc.queueMsg(ircmsgs.deop(channel, nick))
def __call__(self, irc, msg):
def ignore(reason):
self.log.debug('Ignoring %q, %s.', msg, reason)
if not msg.args:
ignore('no msg.args')
elif not irc.isChannel(msg.args[0]):
ignore('not on a channel')
elif not self.registryValue('enable', msg.args[0]):
ignore('supybot.plugins.Protector.enable is False.')
elif msg.args[0] not in irc.state.channels:
# One has to wonder how this would happen, but just in case...
ignore('bot isn\'t in channel')
elif irc.nick not in irc.state.channels[msg.args[0]].ops:
ignore('bot is not opped')
elif msg.nick not in irc.state.channels[msg.args[0]].users:
ignore('sender is not in channel (ChanServ, maybe?)')
elif msg.nick not in irc.state.channels[msg.args[0]].ops:
ignore('sender is not an op in channel (IRCOP, maybe?)')
elif self.isImmune(irc, msg):
ignore('sender is immune')
else:
super(Protector, self).__call__(irc, msg)
def doMode(self, irc, msg):
channel = msg.args[0]
chanOp = ircdb.makeChannelCapability(channel, 'op')
chanVoice = ircdb.makeChannelCapability(channel, 'voice')
chanHalfOp = ircdb.makeChannelCapability(channel, 'halfop')
if not ircdb.checkCapability(msg.prefix, chanOp):
irc.sendMsg(ircmsgs.deop(channel, msg.nick))
for (mode, value) in ircutils.separateModes(msg.args[1:]):
if not value:
continue
if ircutils.strEqual(value, msg.nick):
# We allow someone to mode themselves to oblivion.
continue
if irc.isNick(value):
hostmask = irc.state.nickToHostmask(value)
if mode == '+o':
if not self.isOp(irc, channel, hostmask):
irc.queueMsg(ircmsgs.deop(channel, value))
elif mode == '+h':
if not ircdb.checkCapability(hostmask, chanHalfOp):
irc.queueMsg(ircmsgs.dehalfop(channel, value))
elif mode == '+v':
if not ircdb.checkCapability(hostmask, chanVoice):
irc.queueMsg(ircmsgs.devoice(channel, value))
elif mode == '-o':
if ircdb.checkCapability(hostmask, chanOp):
irc.queueMsg(ircmsgs.op(channel, value))
elif mode == '-h':
if ircdb.checkCapability(hostmask, chanOp):
irc.queueMsg(ircmsgs.halfop(channel, value))
elif mode == '-v':
if ircdb.checkCapability(hostmask, chanOp):
irc.queueMsg(ircmsgs.voice(channel, value))
else:
assert ircutils.isUserHostmask(value)
# Handle bans.
def doKick(self, irc, msg):
channel = msg.args[0]
kicked = msg.args[1].split(',')
protected = []
for nick in kicked:
if ircutils.strEqual(nick, irc.nick):
return # Channel will handle the rejoin.
for nick in kicked:
hostmask = irc.state.nickToHostmask(nick)
if self.isProtected(irc, channel, hostmask):
self.log.info('%s was kicked from %s and is protected; '
'inviting back.', hostmask, channel)
hostmask = '%s!%s' % (nick, irc.state.nickToHostmask(nick))
protected.append(nick)
bans = []
for banmask in irc.state.channels[channel].bans:
if ircutils.hostmaskPatternEqual(banmask, hostmask):
bans.append(banmask)
irc.queueMsg(ircmsgs.unbans(channel, bans))
irc.queueMsg(ircmsgs.invite(nick, channel))
if not self.isOp(irc, channel, msg.prefix):
self.demote(irc, channel, msg.nick)
Class = Protector
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
figment/falloutsnip | Vendor/IronPython/Lib/modulefinder.py | 215 | 24283 | """Find modules used by a script, using introspection."""
# This module should be kept compatible with Python 2.2, see PEP 291.
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# remain compatible with Python < 2.3
READ_MODE = "r"
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
fp = open(pathname, READ_MODE)
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
fp = open(pathname, READ_MODE)
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules thay appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
| gpl-3.0 |
TrailingDots/simple_log_messaging | easy_py_messaging/dirSvc.py | 3 | 11718 | #!/bin/env python
"""
Server:
Directory services for
easy_py_messaging - easy messaging client and servers
control_messaging - Control messaging structure
"""
import sys
import os
import time
import zmq
import json
import pickle
import atexit
import logConfig
import platform
import loggingClientTask
NOISY = False # Set to True for debug/trace
def exiting(exit_msg):
print('dirSvc: exiting:' + exit_msg)
class DirEntry(object):
"""A single directory entry."""
def __init__(self, key, value):
self.key = key
self.value = value
def to_JSON(self):
return json.dumps(self)
class DirOperations(object):
"""Various operations on the directory service."""
def __init__(self, config):
# All the entries for this directory.
# Key = name of entry
# Value = a DirEntry
self.directory = {}
self.pickle_filename = config['memory_filename']
self.clear = config['clear']
# receive requests on this port.
self.dir_port = int(config['port'])
# Seconds to start persistence unless reset
self.DELTA_UPDATE_SECS = 5
# Is the in-core database dirty?
# (Does it need saving?)
self.set_clean()
self.is_dirty = False
# Time to persist dir. Seconds from epoch.
self.is_dirty_persist = self.next_persist_time()
# logCollector connection
self.client = loggingClientTask.LoggingClientClass(platform.node())
self.client.start()
log_entry = 'Starting=DirectoryService,port=%d,pid=%d,memory=%s' % \
(self.dir_port, os.getpid(), config['memory_filename'])
self.client.info(log_entry)
print log_entry
# Unless clear is set, logs from memory_filename.
self.from_pickle()
def next_persist_time(self):
self.is_dirty_persist = time.time() + \
self.DELTA_UPDATE_SECS # update time.
def to_pickle(self):
"""Persist the names in a pickle file.
This unconditionally persists the directory.
This may be due to a persist command."""
try:
pickle.dump(self.directory,
open(self.pickle_filename, 'wb'))
except Exception as err:
err_str = 'to_pickle=cannot_open,file=%s,err=%s' % \
(self.pickle_filename, str(err))
self.client.critical(err_str)
sys.stderr.write(err_str + '\n')
sys.exit(1)
self.set_clean()
self.client.debug('pickled_to=' + self.pickle_filename)
return True
def from_pickle(self):
"""Load from pickle file"""
if os.path.isfile(self.pickle_filename):
try:
self.directory = pickle.load(open(self.pickle_filename, 'rb'))
except EOFError:
pass # Ignore empty file
else:
self.client.critical('from_pickle=%s,status=not_found' %
self.pickle_filename)
sys.stderr.write('FATAL ERROR: Cannot process memory_file: "%s"\n' %
self.pickle_filename)
sys.exit(1)
self.set_clean()
self.client.debug('from_pickle=%s,status=OK' %
self.pickle_filename)
def persist_timeout_check(self):
"""On a time out, conditionally persist the dictionary
only if the directory is dirty."""
if not self.is_dirty:
return
# directory is dirty. Time to write out?
if time.time() > self.is_dirty_persist:
self.to_pickle()
def to_JSON(self):
self.client.debug('to_json=True')
return json.dumps(self.directory, default=lambda x: x.__dict__)
def add_key_val(self, key, value):
"""
add key and value to Directory.
What is the purpose of storing the entire
DirEntry object into the directory?
Because later additional data may become
associated with this object and this makes
extensions much easier. (We'll see...)
"""
if NOISY: print 'add_key_val(%s, %s)' % (str(key), str(value))
if key not in self.directory:
self.set_dirty()
dir_entry = DirEntry(key, value)
self.directory[key] = dir_entry
self.client.info('add_key_val=%s,value=%s' % (key, value))
return value
def handle_meta(self, key):
"""
Meta Queries - Queries that request info
about directory services and not about ports.
A delete request is the name prefixed by '~'.
All meta queries get prefixed with '@'
on both ends:
@PERSIST = Persist the dir immediately.
@DIR = Reply with all ports in the dir.
@CLEAR = Clears the directory
@CLEAR_DIRECTORY = Clears the directory
@MEMORY_FILENAME = Reply with the name of the memory file
@EXIT = Exit this program. Used for code coverage.
Returns: None if not a meta, else non-None.
"""
if key == '@DIR':
# Return entire directory in JSON
data = self.to_JSON()
self.client.info('@DIR=%s' % str(data))
return data
if key == '@PERSIST':
self.to_pickle()
self.client.info('@PERSIST=True')
return 'True'
if key == '@CLEAR' or key == '@CLEAR_DIRECTORY':
self.directory = {}
self.client.info('@CLEAR_DIRECTORY=True')
return True
if key == '@MEMORY_FILENAME':
self.client.info('@MEMORY_FILENAME=%s' % self.pickle_filename)
return self.pickle_filename
if key == '@EXIT':
return '@EXIT'
# All valid meta commands compared. If the key tags as meta by a
# leading '@', but is not in the above list, flag as unknown meta
# command.
if key[0] == '@':
self.client.error('name=%s,status=unknown_meta_command' % key)
return '@UNKNOWN_META_COMMAND'
return None # No meta query found.
def get_port(self, key):
"""
Get a port by name. If the name does not
exist in the directory, then increment to the
next port and return that.
A name with a prefix of '~' means delete
that name. If the name does not exist, ignore it.
Returns: port associated with name.
"""
if len(key) == 0:
return 0 # bogus port - let user handle
# Handle delete request, if requested.
if key[0] == '~':
self.del_key(key)
return True
# Handle meta query if requested.
if key[0] == '@':
return self.handle_meta(key)
port = self.directory.get(key, None)
if port is None:
port = logConfig.incDirNamePort()
port = self.add_key_val(key, port)
else:
port = port.value
self.client.info('get_port_key=%s,port=%s' % (key, port))
if NOISY: print 'get_port(%s) = %s' % (key, port)
return port
def del_key(self, key):
"""
Delete the given key.
Returns True if key in directory, else None
A delete request has a leading '~' char.
"""
key = key[1:]
if key in self.directory:
del self.directory[key]
self.set_dirty()
self.client.info('del_key=%s,status=deleted' % key)
return 'DELETED'
self.client.info('del_key=%s,status=not_found' % key)
return 'not_found'
def set_dirty(self):
"""Set a time for to automatically
persist the DB."""
self.is_dirty = True
self.is_dirty_persist = self.next_persist_time()
def set_clean(self):
"""Set clean. Then set a time
to automatically persist the DB."""
self.is_dirty = True
self.is_dirty_persist = self.next_persist_time()
def usage():
"""Print the usage blurb and exit."""
print 'dirSvc [--help] [--port] [--memory-file=memory_filename]'
print '\t\t[--clear]'
print '\t--help = This blurb'
print '\t--port=aport = Port to expect queries.'
print '\t--memory-file=memory_filename = File to persist names'
print '\t\tDefault: ./dirSvc.data'
print '\t--clear = Clear memory-file upon starting.'
print '\t\tDefault: False, do not clear but load memory-file'
print ''
sys.exit(1)
def parseOpts():
import getopt
global NOISY
try:
opts, args = getopt.gnu_getopt(
sys.argv[1:], 'cpmh',
['port=', # Port # to expect messages
'memory-file=', # Which file to persist names
'help', # Help blurb
'noisy', # Turn noise on
'clear' # If set, clean memory-file at start
]
)
except getopt.GetoptError as err:
print err
usage()
# Number leading args to shift out
shift_out = 0
config = {
'clear': False,
'memory_filename': './dirSvc.data',
'port': str(logConfig.DIR_PORT),
}
for opt, arg in opts:
if opt in ['-h', '--help']:
usage()
elif opt in ['--noisy']:
NOISY = True
continue
elif opt in ['p', '--port']:
try:
# Ensure a valid integer port
_ = int(arg)
except Exception as err:
sys.stdout.write(str(err) + '\n')
usage()
config['port'] = arg
logConfig.DIR_PORT = arg
shift_out += 1
continue
elif opt in ['m', '--memory-file']:
shift_out += 1
config['memory_filename'] = arg
continue
elif opt in ['c', '--clear']:
shift_out += 1
config['clear'] = True
continue
# pass the remaining args to the rest of the program.
for _ in range(shift_out):
del sys.argv[1]
return config
def main():
"""
Main processing loop.
The ZeroMQ pattern is The Lazy Pirate
"""
config = parseOpts()
context = zmq.Context(1)
server = context.socket(zmq.REP)
port = logConfig.get_directory_port()
try:
server.bind("tcp://*:%s" % str(port))
except zmq.ZMQError as err:
sys.stderr.write('ZMQError: %s\n' % err)
sys.stderr.write('Please kill other instances of this program.\n')
sys.stderr.write('Or: another program may be using port %s\n' %
str(port))
sys.exit(1)
sys.stdout.write('dirSvc started. pid %s port %s\n' %
(str(os.getpid()), str(port)))
dir_ops = DirOperations(config)
sequence = 0
while True:
dir_ops.persist_timeout_check()
# Wait for a port naming request.
# Notice this recv waits forever. This implies
# a dirty directory will not get cleared.
# Should a timeout change this logic?
if NOISY: print("I: Normal receive port: %s)" % port)
request = server.recv()
port = dir_ops.get_port(request)
if NOISY: print("I: Normal request (%s:%s)" % (request, str(port)))
server.send(str(port))
if str(port) == '@EXIT':
break
if sequence % 10 == 0:
json_str = dir_ops.to_JSON()
print json_str
sequence += 1
# Shut down ZeroMQ sockets in an orderly manner.
server.close()
context.term()
if __name__ == '__main__':
atexit.register(exiting, 'Exiting dirSvc')
main()
| gpl-3.0 |
bharcode/Kaggle | PhotoQualityPrediction/feature_selection.py | 2 | 9825 | #!/usr/bin/env python
from numpy import *
import utilities
import classification
def generate_features(meta_data_train, y_train, meta_data_test):
""" Generates features for classifier. """
# Generate maps.
name_score_map, desc_score_map, caption_score_map, word_score_map = \
generate_text_score_map(meta_data_train, y_train)
geo_score_map, lat_score_map, lon_score_map = generate_geo_score_map(
meta_data_train, y_train)
shape_score_map, size_score_map, width_score_map, height_score_map = \
generate_size_score_map(meta_data_train, y_train)
# Genearte text features.
text_features_train = generate_text_features(meta_data_train,
name_score_map, desc_score_map, caption_score_map, word_score_map)
text_features_test = generate_text_features(meta_data_test,
name_score_map, desc_score_map, caption_score_map, word_score_map)
# Generates geo features.
geo_features_train = generate_geo_features(meta_data_train, geo_score_map,
lat_score_map, lon_score_map)
geo_features_test = generate_geo_features(meta_data_test, geo_score_map,
lat_score_map, lon_score_map)
# Generates size features
size_features_train = generate_size_features(meta_data_train,
shape_score_map, size_score_map, width_score_map, height_score_map)
size_features_test = generate_size_features(meta_data_test,
shape_score_map, size_score_map, width_score_map, height_score_map)
# Combines all features.
x_train = []
for i in range(len(text_features_train)):
x_train.append(text_features_train[i] + size_features_train[i] \
+ geo_features_train[i])
x_test = []
for i in range(len(text_features_test)):
x_test.append(text_features_test[i] + size_features_test[i] \
+ geo_features_test[i])
return (x_train, x_test)
def generate_geo_features(meta_data, geo_score_map, lat_score_map,
lon_score_map):
""" Generates features for geo information. """
geo_avg_score = get_map_avg(geo_score_map)
lat_avg_score = get_map_avg(lat_score_map)
lon_avg_score = get_map_avg(lon_score_map)
geo_score_features = []
for line in meta_data:
lat = line[0]
lon = line[1]
geo = (lat, lon)
geo_score = geo_avg_score
if geo in geo_score_map:
geo_score = geo_score_map[geo]
lat_score = lat_avg_score
if lat in lat_score_map:
lat_score = lat_score_map[lat]
lon_score = lon_avg_score
if lon in lon_score_map:
lon_score = lon_score_map[lon]
geo_score_features.append([geo_score, lat_score, lon_score])
return geo_score_features
def generate_geo_score_map(meta_data, y):
""" Generates score map for geo information. """
print 'Extracting geo features...'
geo_score_pairs = []
lat_score_pairs = []
lon_score_pairs = []
for i in range(len(y)):
lat = meta_data[i][0]
lon = meta_data[i][1]
geo = (lat, lon)
geo_score_pairs.append((geo, y[i]))
lat_score_pairs.append((lat, y[i]))
lon_score_pairs.append((lon, y[i]))
geo_score_map = create_key_avg_map(geo_score_pairs)
lat_score_map = create_key_avg_map(lat_score_pairs)
lon_score_map = create_key_avg_map(lon_score_pairs)
return (geo_score_map, lat_score_map, lon_score_map)
def generate_size_features(meta_data, shape_score_map, size_score_map,
width_score_map, height_score_map):
""" Generates features for shape, size. """
avg_shape_score = get_map_avg(shape_score_map)
avg_size_score = get_map_avg(size_score_map)
avg_width_score = get_map_avg(width_score_map)
avg_height_score = get_map_avg(height_score_map)
size_score_features = []
for line in meta_data:
width = line[2]
height = line[3]
shape = (width, height)
size = line[4]
shape_score = avg_shape_score
if shape in shape_score_map:
shape_score = shape_score_map[shape]
size_score = avg_size_score
if size in size_score_map:
size_score = size_score_map[size]
width_score = avg_width_score
if width in width_score_map:
width_score = width_score_map[width]
height_score = avg_height_score
if height in height_score_map:
height_score = height_score_map[height]
size_score_features.append(
[shape_score, size_score, width_score, height_score])
return size_score_features
def generate_size_score_map(meta_data, y):
""" Generates score map for width, heigth, size. """
print 'Extracting size features...'
shape_score_pairs = []
size_score_pairs = []
width_score_pairs = []
height_score_pairs = []
for i in range(len(y)):
width = meta_data[i][2]
height = meta_data[i][3]
shape = (width, height)
size = meta_data[i][4]
shape_score_pairs.append((shape, y[i]))
size_score_pairs.append((size, y[i]))
width_score_pairs.append((width, y[i]))
height_score_pairs.append((height, y[i]))
shape_score_map = create_key_avg_map(shape_score_pairs)
size_score_map = create_key_avg_map(size_score_pairs)
width_score_map = create_key_avg_map(width_score_pairs)
height_score_map = create_key_avg_map(height_score_pairs)
return (shape_score_map, size_score_map, width_score_map, height_score_map)
def generate_text_features(meta_data, name_score_map, desc_score_map,
caption_score_map, word_score_map):
""" Generates features from name, desc, caption. """
avg_name_score = get_map_avg(name_score_map)
avg_desc_score = get_map_avg(desc_score_map)
avg_caption_score = get_map_avg(caption_score_map)
text_score_features = []
for i in range(len(meta_data)):
name = meta_data[i][5].split(' ')
desc = meta_data[i][6].split(' ')
caption = meta_data[i][7].split(' ')
name_scores = []
for s in name:
if s in name_score_map:
name_scores.append(name_score_map[s])
elif s in word_score_map:
name_scores.append(word_score_map[s])
else:
name_scores.append(avg_name_score)
desc_scores = []
for s in desc:
if s in desc_score_map:
desc_scores.append(desc_score_map[s])
elif s in word_score_map:
desc_scores.append(word_score_map[s])
else:
desc_scores.append(avg_desc_score)
caption_scores = []
for s in caption:
if s in caption_score_map:
caption_scores.append(caption_score_map[s])
elif s in word_score_map:
caption_scores.append(word_score_map[s])
else:
caption_scores.append(avg_caption_score)
# Generates features.
name_avg_score = float(sum(name_scores)) / len(name_scores)
desc_avg_score = float(sum(desc_scores)) / len(desc_scores)
caption_avg_score = float(sum(caption_scores)) / len(caption_scores)
all_scores = name_scores + desc_scores + caption_scores
total_avg_score = float(sum(all_scores)) / len(all_scores)
name_std = std(name_scores, name_avg_score)
desc_std = std(desc_scores, desc_avg_score)
caption_std = std(caption_scores, caption_avg_score)
total_std = std(all_scores, total_avg_score)
name_len = 0
if name[0] != '':
name_len = len(name)
desc_len = 0
if desc[0] != '':
desc_len = len(desc)
caption_len = 0
if caption[0] != '':
caption_len = len(caption)
text_score_features.append([name_avg_score, desc_avg_score,
caption_avg_score, total_avg_score, name_len, desc_len,
caption_len, name_std, desc_std, caption_std, total_std])
return text_score_features
def generate_text_score_map(meta_data, y):
""" Generates the text score map for text features. """
print 'Extracting text features...'
name_y_pairs = []
desc_y_pairs = []
caption_y_pairs = []
for i in range(len(y)):
name = meta_data[i][5].split(' ')
desc = meta_data[i][6].split(' ')
caption = meta_data[i][7].split(' ')
for s in name:
name_y_pairs.append((s, y[i]))
for s in desc:
desc_y_pairs.append((s, y[i]))
for s in caption:
caption_y_pairs.append((s, y[i]))
word_y_pairs = name_y_pairs + desc_y_pairs + caption_y_pairs
name_score_map = create_key_avg_map(name_y_pairs)
desc_score_map = create_key_avg_map(desc_y_pairs)
caption_score_map = create_key_avg_map(caption_y_pairs)
word_score_map = create_key_avg_map(word_y_pairs)
return (name_score_map, desc_score_map, caption_score_map, word_score_map)
def std(iterable, avg):
""" Calculate the standard deviation. """
std = 0.0
for n in iterable:
std += (n - avg) ** 2
return math.sqrt(std)
def get_map_avg(k_v_map):
""" Calculates the average value of a map. """
avg = 0.0
for key in k_v_map.keys():
avg += k_v_map[key]
return float(avg) / len(k_v_map)
def create_key_avg_map(k_v_pairs):
""" Creates a map which maps a key to its average value. """
key_avg_map = {}
for pair in k_v_pairs:
k = pair[0]
v = pair[1]
if k not in key_avg_map:
key_avg_map[k] = [v, 1]
else:
key_avg_map[k][0] += v
key_avg_map[k][1] += 1
for key in key_avg_map.keys():
key_avg_map[key] = float(key_avg_map[key][0]) / key_avg_map[key][1]
return key_avg_map
if __name__ == '__main__':
pass
| gpl-2.0 |
Bachaco-ve/odoo | addons/account/installer.py | 381 | 8404 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
import time
import urllib2
import urlparse
try:
import simplejson as json
except ImportError:
import json # noqa
from openerp.release import serie
from openerp.tools.translate import _
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class account_installer(osv.osv_memory):
_name = 'account.installer'
_inherit = 'res.config.installer'
def _get_charts(self, cr, uid, context=None):
modules = self.pool.get('ir.module.module')
# try get the list on apps server
try:
apps_server = self.pool.get('ir.module.module').get_apps_server(cr, uid, context=context)
up = urlparse.urlparse(apps_server)
url = '{0.scheme}://{0.netloc}/apps/charts?serie={1}'.format(up, serie)
j = urllib2.urlopen(url, timeout=3).read()
apps_charts = json.loads(j)
charts = dict(apps_charts)
except Exception:
charts = dict()
# Looking for the module with the 'Account Charts' category
category_name, category_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'module_category_localization_account_charts')
ids = modules.search(cr, uid, [('category_id', '=', category_id)], context=context)
if ids:
charts.update((m.name, m.shortdesc) for m in modules.browse(cr, uid, ids, context=context))
charts = sorted(charts.items(), key=itemgetter(1))
charts.insert(0, ('configurable', _('Custom')))
return charts
_columns = {
# Accounting
'charts': fields.selection(_get_charts, 'Accounting Package',
required=True,
help="Installs localized accounting charts to match as closely as "
"possible the accounting needs of your company based on your "
"country."),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months', '3 Monthly')], 'Periods', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has Default Company', readonly=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id and user.company_id.id or False
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
_defaults = {
'date_start': lambda *a: time.strftime('%Y-01-01'),
'date_stop': lambda *a: time.strftime('%Y-12-31'),
'period': 'month',
'company_id': _default_company,
'has_default_company': _default_has_default_company,
'charts': 'configurable'
}
def get_unconfigured_cmp(self, cr, uid, context=None):
""" get the list of companies that have not been configured yet
but don't care about the demo chart of accounts """
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
return list(set(company_ids)-set(configured_cmp))
def check_unconfigured_cmp(self, cr, uid, context=None):
""" check if there are still unconfigured companies """
if not self.get_unconfigured_cmp(cr, uid, context=context):
raise osv.except_osv(_('No Unconfigured Company!'), _("There is currently no company without chart of account. The wizard will therefore not be executed."))
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None: context = {}
res = super(account_installer, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
cmp_select = []
# display in the widget selection only the companies that haven't been configured yet
unconfigured_cmp = self.get_unconfigured_cmp(cr, uid, context=context)
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in self.pool.get('res.company').browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def on_change_start_date(self, cr, uid, id, start_date=False):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def execute(self, cr, uid, ids, context=None):
self.execute_simple(cr, uid, ids, context)
return super(account_installer, self).execute(cr, uid, ids, context=context)
def execute_simple(self, cr, uid, ids, context=None):
if context is None:
context = {}
fy_obj = self.pool.get('account.fiscalyear')
for res in self.read(cr, uid, ids, context=context):
if 'date_start' in res and 'date_stop' in res:
f_ids = fy_obj.search(cr, uid, [('date_start', '<=', res['date_start']), ('date_stop', '>=', res['date_stop']), ('company_id', '=', res['company_id'][0])], context=context)
if not f_ids:
name = code = res['date_start'][:4]
if int(name) != int(res['date_stop'][:4]):
name = res['date_start'][:4] + '-' + res['date_stop'][:4]
code = res['date_start'][2:4] + '-' + res['date_stop'][2:4]
vals = {
'name': name,
'code': code,
'date_start': res['date_start'],
'date_stop': res['date_stop'],
'company_id': res['company_id'][0]
}
fiscal_id = fy_obj.create(cr, uid, vals, context=context)
if res['period'] == 'month':
fy_obj.create_period(cr, uid, [fiscal_id])
elif res['period'] == '3months':
fy_obj.create_period3(cr, uid, [fiscal_id])
def modules_to_install(self, cr, uid, ids, context=None):
modules = super(account_installer, self).modules_to_install(
cr, uid, ids, context=context)
chart = self.read(cr, uid, ids, ['charts'],
context=context)[0]['charts']
_logger.debug('Installing chart of accounts %s', chart)
return (modules | set([chart])) - set(['has_default_company', 'configurable'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jplusplus/detective.io | app/detective/migrations/0011_load_myfixture.py | 3 | 13400 | # -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
fixtures = [
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "personal_payer",
"subject": "FundraisingRound",
"label": "was financed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "payer",
"subject": "FundraisingRound",
"label": "was financed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "educated_in",
"subject": "Person",
"label": "was educated in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "based_in",
"subject": "Person",
"label": "is based in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "activity_in_organization",
"subject": "Person",
"label": "has activity in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "previous_activity_in_organization",
"subject": "Person",
"label": "had previous activity in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "price",
"subject": "EnergyProduct",
"label": "is sold at"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "author",
"subject": "Commentary",
"label": "was written by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "distribution",
"subject": "EnergyProduct",
"label": "is distributed in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "operator",
"subject": "EnergyProduct",
"label": "is operated by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "price",
"subject": "EnergyProduct",
"label": "is sold at"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "adviser",
"subject": "Organization",
"label": "is advised by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "key_person",
"subject": "Organization",
"label": "is staffed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "partner",
"subject": "Organization",
"label": "has a partnership with"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "fundraising_round",
"subject": "Organization",
"label": "was financed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "monitoring_body",
"subject": "Organization",
"label": "is monitored by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "litigation_against",
"subject": "Organization",
"label": "has a litigation against"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "revenue",
"subject": "Organization",
"label": "has revenue of"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "board_member",
"subject": "Organization",
"label": "has board of directors with"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "commentary",
"subject": "EnergyProject",
"label": "is analyzed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "owner",
"subject": "EnergyProject",
"label": "is owned by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "partner",
"subject": "EnergyProject",
"label": "has a partnership with"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "activity_in_country",
"subject": "EnergyProject",
"label": "has activity in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "activity_in_country",
"subject": "Distribution",
"label": "has activity in"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "product",
"subject": "EnergyProject",
"label": "has product of"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "commentary",
"subject": "EnergyProject",
"label": "is analyzed by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "owner",
"subject": "EnergyProject",
"label": "is owned by"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "partner",
"subject": "EnergyProject",
"label": "has partnership with"
}
},
{
"pk": None,
"model": "detective.searchterm",
"fields": {
"topic": 2,
"name": "activity_in_country",
"subject": "EnergyProject",
"label": "has activity in"
}
}
]
for st in fixtures:
st["fields"]["topic"] = orm["detective.topic"].objects.get(id=st["fields"]["topic"])
obj = orm["detective.searchterm"](**st["fields"])
obj.save()
def backwards(self, orm):
"Write your backwards methods here."
no_dry_run = True
models = {
u'detective.article': {
'Meta': {'object_name': 'Article'},
'content': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.quoterequest': {
'Meta': {'object_name': 'QuoteRequest'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'records': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'detective.searchterm': {
'Meta': {'object_name': 'SearchTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'subject': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.topic': {
'Meta': {'object_name': 'Topic'},
'about': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
'ontology': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['detective']
symmetrical = True
| lgpl-3.0 |
llhe/tensorflow | tensorflow/contrib/data/python/util/nest.py | 6 | 18292 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It adds support for dictionaries as a level of nesting in nested structures.
2. It removes support for lists as a level of nesting in nested structures.
The motivation for this change is twofold:
1. Many input-processing functions (e.g. `tf.parse_example()`) return
dictionaries, and we would like to support them natively in datasets.
2. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python.util.all_util import remove_undocumented
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# This is a dict. Iterate over the keys in sorted order to make
# this deterministic.
return {k: v for k, v in zip(sorted(instance.keys()), args)}
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _elements_of(nest):
if isinstance(nest, dict):
# Iterate over dict keys in sorted order to make this deterministic.
return [v for _, v in sorted(nest.items())]
else:
return nest
def _yield_flat_nest(nest):
for n in _elements_of(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if `seq` is a Sequence or dict (except strings/lists).
NOTE(mrry): This differs from `tensorflow.python.util.nest.is_sequence()`,
which *does* treat a Python list as a sequence. For ergonomic
reasons, `tf.contrib.data` users would prefer to treat lists as
implict `tf.Tensor` objects, and dicts as (nested) sequences.
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string or list and is a
collections.Sequence.
"""
return (isinstance(seq, (_collections.Sequence, dict))
and not isinstance(seq, (list, _six.string_types)))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2, check_types):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if check_types and type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(_elements_of(nest1), _elements_of(nest2)):
_recursive_assert_same_structure(n1, n2, check_types)
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well. If set to `False`, for example a list and a tuple of objects will
look same if they have the same size.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that acceps as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
_allowed_symbols = [
"assert_same_structure",
"is_sequence",
"flatten",
"pack_sequence_as",
"map_structure",
"assert_shallow_structure",
"flatten_up_to",
"map_structure_up_to",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
LoHChina/nova | nova/api/openstack/compute/contrib/security_group_default_rules.py | 18 | 5867 | # Copyright 2013 Metacloud Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import webob
from webob import exc
from nova.api.openstack.compute.contrib import security_groups as sg
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
authorize = extensions.extension_authorizer('compute',
'security_group_default_rules')
class SecurityGroupDefaultRulesController(sg.SecurityGroupControllerBase):
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def create(self, req, body):
context = sg._authorize_context(req)
authorize(context)
# NOTE(shaohe-feng): back-compatible with db layer hard-code
# admin permission checks.
nova_context.require_admin_context(context)
sg_rule = self._from_body(body, 'security_group_default_rule')
try:
values = self._rule_args_to_dict(to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if values is None:
msg = _('Not enough parameters to build a valid rule.')
raise exc.HTTPBadRequest(explanation=msg)
if self.security_group_api.default_rule_exists(context, values):
msg = _('This default rule already exists.')
raise exc.HTTPConflict(explanation=msg)
security_group_rule = self.security_group_api.add_default_rules(
context, [values])[0]
fmt_rule = self._format_security_group_default_rule(
security_group_rule)
return {'security_group_default_rule': fmt_rule}
def _rule_args_to_dict(self, to_port=None, from_port=None,
ip_protocol=None, cidr=None):
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def show(self, req, id):
context = sg._authorize_context(req)
authorize(context)
id = self.security_group_api.validate_id(id)
try:
rule = self.security_group_api.get_default_rule(context, id)
except exception.SecurityGroupDefaultRuleNotFound:
msg = _("security group default rule not found")
raise exc.HTTPNotFound(explanation=msg)
fmt_rule = self._format_security_group_default_rule(rule)
return {"security_group_default_rule": fmt_rule}
def delete(self, req, id):
context = sg._authorize_context(req)
authorize(context)
# NOTE(shaohe-feng): back-compatible with db layer hard-code
# admin permission checks.
nova_context.require_admin_context(context)
try:
id = self.security_group_api.validate_id(id)
except exception.Invalid as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
try:
rule = self.security_group_api.get_default_rule(context, id)
self.security_group_api.remove_default_rules(context, [rule['id']])
except exception.SecurityGroupDefaultRuleNotFound as ex:
raise exc.HTTPNotFound(explanation=ex.format_message())
return webob.Response(status_int=204)
def index(self, req):
context = sg._authorize_context(req)
authorize(context)
ret = {'security_group_default_rules': []}
try:
for rule in self.security_group_api.get_all_default_rules(context):
rule_fmt = self._format_security_group_default_rule(rule)
ret['security_group_default_rules'].append(rule_fmt)
except exception.SecurityGroupDefaultRuleNotFound as ex:
raise exc.HTTPNotFound(explanation=ex.format_message())
return ret
def _format_security_group_default_rule(self, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['ip_range'] = {}
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
class Security_group_default_rules(extensions.ExtensionDescriptor):
"""Default rules for security group support."""
name = "SecurityGroupDefaultRules"
alias = "os-security-group-default-rules"
namespace = ("http://docs.openstack.org/compute/ext/"
"securitygroupdefaultrules/api/v1.1")
updated = "2013-02-05T00:00:00Z"
def get_resources(self):
resources = [
extensions.ResourceExtension('os-security-group-default-rules',
SecurityGroupDefaultRulesController(),
collection_actions={'create': 'POST',
'delete': 'DELETE',
'index': 'GET'},
member_actions={'show': 'GET'})]
return resources
| apache-2.0 |
scollier/openshift-ansible-contrib | misc/gce-federation/library/gce.py | 8 | 27025 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
required: false
default: "debian-8"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
required: false
default: null
service_account_email:
version_added: "1.5.1"
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
default: null
required: false
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
required: false
default: null
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
required: false
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
required: false
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
required: false
default: null
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
required: false
default: null
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for
gateways)
required: false
default: "false"
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
required: false
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
preemptible:
version_added: "2.1"
description:
- if set to true, instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
required: false
default: "false"
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
required: false
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, seperated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
service_account_email = module.params.get('service_account_email')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name']))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone_name, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and \
node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and \
node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-8'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(aliases=['base_name']),
num_instances = dict(type='int'),
network = dict(default='default'),
subnetwork = dict(),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
disk_size = dict(type='int', default=10),
preemptible = dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
number = module.params.get('num_instances')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
self.image = self.gce.ex_get_image(self.name)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| apache-2.0 |
StickmanVentures/lol-data-aggregator | app/models/participant.py | 1 | 2368 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class Participant(Base):
__tablename__ = 'participations'
id = Column(String(32), primary_key=True)
team_id = Column(String(41))
match_id = Column(Integer)
summoner_id = Column(Integer)
match_timestamp = Column(Integer)
queue = Column(String)
kills = Column(Integer)
deaths = Column(Integer)
assists = Column(Integer)
double_kills = Column(Integer)
triple_kills = Column(Integer)
quadra_kills = Column(Integer)
penta_kills = Column(Integer)
sight_wards_bought_in_game = Column(Integer)
vision_wards_bought_in_game = Column(Integer)
wards_placed = Column(Integer)
wards_killed = Column(Integer)
total_damage_taken = Column(Integer)
true_damage_taken = Column(Integer)
magic_damage_taken = Column(Integer)
physical_damage_taken = Column(Integer)
total_damage_dealt = Column(Integer)
true_damage_dealt = Column(Integer)
magic_damage_dealt = Column(Integer)
physical_damage_dealt = Column(Integer)
total_time_crowd_control_dealt = Column(Integer)
total_damage_dealt_to_champions = Column(Integer)
true_damage_dealt_to_champions = Column(Integer)
magic_damage_dealt_to_champions = Column(Integer)
physical_damage_dealt_to_champions = Column(Integer)
gold_earned = Column(Integer)
gold_spent = Column(Integer)
minions_killed = Column(Integer)
tower_kills = Column(Integer)
inhibitor_kills = Column(Integer)
first_blood_kill = Column(Integer)
first_blood_assist = Column(Integer)
first_tower_kill = Column(Integer)
first_tower_assist = Column(Integer)
first_inhibitor_kill = Column(Integer)
first_inhibitor_assist = Column(Integer)
champion_id = Column(Integer)
lane = Column(String(6))
role = Column(String(11))
winner = Column(Integer)
team_first_blood = Column(Integer)
team_first_tower = Column(Integer)
team_first_inhibitor = Column(Integer)
team_first_dragon = Column(Integer)
team_first_baron = Column(Integer)
team_tower_kills = Column(Integer)
team_inhibitor_kills = Column(Integer)
team_dragon_kills = Column(Integer)
team_baron_kills = Column(Integer)
def __repr__(self):
return '<id=%s>' % self.id
| apache-2.0 |
aabbox/kbengine | kbe/res/scripts/common/Lib/test/test_importlib/source/test_case_sensitivity.py | 84 | 3475 | """Test case-sensitivity (PEP 235)."""
from .. import util
from . import util as source_util
importlib = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
import os
import sys
from test import support as test_support
import unittest
@util.case_insensitive_tests
class CaseSensitivityTest:
"""PEP 235 dictates that on case-preserving, case-insensitive file systems
that imports are case-sensitive unless the PYTHONCASEOK environment
variable is set."""
name = 'MoDuLe'
assert name != name.lower()
def finder(self, path):
return self.machinery.FileFinder(path,
(self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES),
(self.machinery.SourcelessFileLoader,
self.machinery.BYTECODE_SUFFIXES))
def sensitivity_test(self):
"""Look for a module with matching and non-matching sensitivity."""
sensitive_pkg = 'sensitive.{0}'.format(self.name)
insensitive_pkg = 'insensitive.{0}'.format(self.name.lower())
context = source_util.create_modules(insensitive_pkg, sensitive_pkg)
with context as mapping:
sensitive_path = os.path.join(mapping['.root'], 'sensitive')
insensitive_path = os.path.join(mapping['.root'], 'insensitive')
sensitive_finder = self.finder(sensitive_path)
insensitive_finder = self.finder(insensitive_path)
return self.find(sensitive_finder), self.find(insensitive_finder)
def test_sensitive(self):
with test_support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
if b'PYTHONCASEOK' in self.importlib._bootstrap._os.environ:
self.skipTest('os.environ changes not reflected in '
'_os.environ')
sensitive, insensitive = self.sensitivity_test()
self.assertIsNotNone(sensitive)
self.assertIn(self.name, sensitive.get_filename(self.name))
self.assertIsNone(insensitive)
def test_insensitive(self):
with test_support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
if b'PYTHONCASEOK' not in self.importlib._bootstrap._os.environ:
self.skipTest('os.environ changes not reflected in '
'_os.environ')
sensitive, insensitive = self.sensitivity_test()
self.assertIsNotNone(sensitive)
self.assertIn(self.name, sensitive.get_filename(self.name))
self.assertIsNotNone(insensitive)
self.assertIn(self.name, insensitive.get_filename(self.name))
class CaseSensitivityTestPEP302(CaseSensitivityTest):
def find(self, finder):
return finder.find_module(self.name)
Frozen_CaseSensitivityTestPEP302, Source_CaseSensitivityTestPEP302 = util.test_both(
CaseSensitivityTestPEP302, importlib=importlib, machinery=machinery)
class CaseSensitivityTestPEP451(CaseSensitivityTest):
def find(self, finder):
found = finder.find_spec(self.name)
return found.loader if found is not None else found
Frozen_CaseSensitivityTestPEP451, Source_CaseSensitivityTestPEP451 = util.test_both(
CaseSensitivityTestPEP451, importlib=importlib, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
maas/maas | src/maasserver/models/tests/test_nodemetadata.py | 1 | 1806 | # Copyright 2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test maasserver NodeMetadata model."""
from crochet import wait_for
from django.core.exceptions import ValidationError
from maasserver.models import NodeMetadata
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
wait_for_reactor = wait_for(30) # 30 seconds.
class TestNodeMetadata(MAASServerTestCase):
def test_str(self):
# A NodeMetadata object string representation references the parent
# node hostname.
node = factory.make_Machine(hostname="foobar")
entry = factory.make_NodeMetadata(node=node, key="key")
self.assertEqual("NodeMetadata (foobar/key)", str(entry))
def test_unique_on_node_and_key(self):
# We can only ever have one NodeMetadata object for a particular node
# and key.
entry = factory.make_NodeMetadata()
self.assertRaises(
ValidationError,
factory.make_NodeMetadata,
node=entry.node,
key=entry.key,
)
def test_multiple_keys_on_node(self):
# We can only ever have one NodeMetadata object for a particular node
# and key.
entry1 = factory.make_NodeMetadata(key="key1", value="value")
entry2 = factory.make_NodeMetadata(
node=entry1.node, key="key2", value="value"
)
self.assertNotEqual(entry1, entry2)
def test_get(self):
node = factory.make_Node()
key = factory.make_name("key")
default = factory.make_name("default")
self.assertEqual(
default,
NodeMetadata.objects.get(node=node, key=key, default=default),
)
| agpl-3.0 |
tanyunshi/python-docx | docx/oxml/text/run.py | 13 | 4888 | # encoding: utf-8
"""
Custom element classes related to text runs (CT_R).
"""
from ..ns import qn
from ..simpletypes import ST_BrClear, ST_BrType
from ..xmlchemy import (
BaseOxmlElement, OptionalAttribute, ZeroOrMore, ZeroOrOne
)
class CT_Br(BaseOxmlElement):
"""
``<w:br>`` element, indicating a line, page, or column break in a run.
"""
type = OptionalAttribute('w:type', ST_BrType)
clear = OptionalAttribute('w:clear', ST_BrClear)
class CT_R(BaseOxmlElement):
"""
``<w:r>`` element, containing the properties and text for a run.
"""
rPr = ZeroOrOne('w:rPr')
t = ZeroOrMore('w:t')
br = ZeroOrMore('w:br')
cr = ZeroOrMore('w:cr')
tab = ZeroOrMore('w:tab')
drawing = ZeroOrMore('w:drawing')
def _insert_rPr(self, rPr):
self.insert(0, rPr)
return rPr
def add_t(self, text):
"""
Return a newly added ``<w:t>`` element containing *text*.
"""
t = self._add_t(text=text)
if len(text.strip()) < len(text):
t.set(qn('xml:space'), 'preserve')
return t
def add_drawing(self, inline_or_anchor):
"""
Return a newly appended ``CT_Drawing`` (``<w:drawing>``) child
element having *inline_or_anchor* as its child.
"""
drawing = self._add_drawing()
drawing.append(inline_or_anchor)
return drawing
def clear_content(self):
"""
Remove all child elements except the ``<w:rPr>`` element if present.
"""
content_child_elms = self[1:] if self.rPr is not None else self[:]
for child in content_child_elms:
self.remove(child)
@property
def style(self):
"""
String contained in w:val attribute of <w:rStyle> grandchild, or
|None| if that element is not present.
"""
rPr = self.rPr
if rPr is None:
return None
return rPr.style
@style.setter
def style(self, style):
"""
Set the character style of this <w:r> element to *style*. If *style*
is None, remove the style element.
"""
rPr = self.get_or_add_rPr()
rPr.style = style
@property
def text(self):
"""
A string representing the textual content of this run, with content
child elements like ``<w:tab/>`` translated to their Python
equivalent.
"""
text = ''
for child in self:
if child.tag == qn('w:t'):
t_text = child.text
text += t_text if t_text is not None else ''
elif child.tag == qn('w:tab'):
text += '\t'
elif child.tag in (qn('w:br'), qn('w:cr')):
text += '\n'
return text
@text.setter
def text(self, text):
self.clear_content()
_RunContentAppender.append_to_run_from_text(self, text)
class CT_Text(BaseOxmlElement):
"""
``<w:t>`` element, containing a sequence of characters within a run.
"""
class _RunContentAppender(object):
"""
Service object that knows how to translate a Python string into run
content elements appended to a specified ``<w:r>`` element. Contiguous
sequences of regular characters are appended in a single ``<w:t>``
element. Each tab character ('\t') causes a ``<w:tab/>`` element to be
appended. Likewise a newline or carriage return character ('\n', '\r')
causes a ``<w:cr>`` element to be appended.
"""
def __init__(self, r):
self._r = r
self._bfr = []
@classmethod
def append_to_run_from_text(cls, r, text):
"""
Create a "one-shot" ``_RunContentAppender`` instance and use it to
append the run content elements corresponding to *text* to the
``<w:r>`` element *r*.
"""
appender = cls(r)
appender.add_text(text)
def add_text(self, text):
"""
Append the run content elements corresponding to *text* to the
``<w:r>`` element of this instance.
"""
for char in text:
self.add_char(char)
self.flush()
def add_char(self, char):
"""
Process the next character of input through the translation finite
state maching (FSM). There are two possible states, buffer pending
and not pending, but those are hidden behind the ``.flush()`` method
which must be called at the end of text to ensure any pending
``<w:t>`` element is written.
"""
if char == '\t':
self.flush()
self._r.add_tab()
elif char in '\r\n':
self.flush()
self._r.add_br()
else:
self._bfr.append(char)
def flush(self):
text = ''.join(self._bfr)
if text:
self._r.add_t(text)
del self._bfr[:]
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/jinja2/optimizer.py | 222 | 1722 | # -*- coding: utf-8 -*-
"""
jinja2.optimizer
~~~~~~~~~~~~~~~~
The jinja optimizer is currently trying to constant fold a few expressions
and modify the AST in place so that it should be easier to evaluate it.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example loop unrolling doesn't work because unrolled loops would
have a different scoping.
The solution would be a second syntax tree that has the scoping rules stored.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def fold(self, node, eval_ctx=None):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(eval_ctx),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
visit_Filter = visit_Test = visit_CondExpr = fold
del fold
| gpl-3.0 |
blooparksystems/odoo | addons/website_quote/models/payment.py | 13 | 1162 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
class PaymentTransaction(orm.Model):
_inherit = 'payment.transaction'
_columns = {
# link with the sale order
'sale_order_id': fields.many2one('sale.order', 'Sale Order'),
}
def form_feedback(self, cr, uid, data, acquirer_name, context=None):
""" Override to confirm the sale order, if defined, and if the transaction
is done. """
tx = None
res = super(PaymentTransaction, self).form_feedback(cr, uid, data, acquirer_name, context=context)
# fetch the tx, check its state, confirm the potential SO
tx_find_method_name = '_%s_form_get_tx_from_data' % acquirer_name
if hasattr(self, tx_find_method_name):
tx = getattr(self, tx_find_method_name)(cr, uid, data, context=context)
if tx and tx.state == 'done' and tx.acquirer_id.auto_confirm == 'at_pay_confirm' and tx.sale_order_id and tx.sale_order_id.state in ['draft', 'sent']:
self.pool['sale.order'].action_confirm(cr, SUPERUSER_ID, [tx.sale_order_id.id], context=context)
return res
| gpl-3.0 |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/openid/yadis/parsehtml.py | 167 | 5850 | __all__ = ['findHTMLMeta', 'MetaNotFound']
from HTMLParser import HTMLParser, HTMLParseError
import htmlentitydefs
import re
from openid.yadis.constants import YADIS_HEADER_NAME
# Size of the chunks to search at a time (also the amount that gets
# read at a time)
CHUNK_SIZE = 1024 * 16 # 16 KB
class ParseDone(Exception):
"""Exception to hold the URI that was located when the parse is
finished. If the parse finishes without finding the URI, set it to
None."""
class MetaNotFound(Exception):
"""Exception to hold the content of the page if we did not find
the appropriate <meta> tag"""
re_flags = re.IGNORECASE | re.UNICODE | re.VERBOSE
ent_pat = r'''
&
(?: \#x (?P<hex> [a-f0-9]+ )
| \# (?P<dec> \d+ )
| (?P<word> \w+ )
)
;'''
ent_re = re.compile(ent_pat, re_flags)
def substituteMO(mo):
if mo.lastgroup == 'hex':
codepoint = int(mo.group('hex'), 16)
elif mo.lastgroup == 'dec':
codepoint = int(mo.group('dec'))
else:
assert mo.lastgroup == 'word'
codepoint = htmlentitydefs.name2codepoint.get(mo.group('word'))
if codepoint is None:
return mo.group()
else:
return unichr(codepoint)
def substituteEntities(s):
return ent_re.sub(substituteMO, s)
class YadisHTMLParser(HTMLParser):
"""Parser that finds a meta http-equiv tag in the head of a html
document.
When feeding in data, if the tag is matched or it will never be
found, the parser will raise ParseDone with the uri as the first
attribute.
Parsing state diagram
=====================
Any unlisted input does not affect the state::
1, 2, 5 8
+--------------------------+ +-+
| | | |
4 | 3 1, 2, 5, 7 v | v
TOP -> HTML -> HEAD ----------> TERMINATED
| | ^ | ^ ^
| | 3 | | | |
| +------------+ +-> FOUND ------+ |
| 6 8 |
| 1, 2 |
+------------------------------------+
1. any of </body>, </html>, </head> -> TERMINATE
2. <body> -> TERMINATE
3. <head> -> HEAD
4. <html> -> HTML
5. <html> -> TERMINATE
6. <meta http-equiv='X-XRDS-Location'> -> FOUND
7. <head> -> TERMINATE
8. Any input -> TERMINATE
"""
TOP = 0
HTML = 1
HEAD = 2
FOUND = 3
TERMINATED = 4
def __init__(self):
HTMLParser.__init__(self)
self.phase = self.TOP
def _terminate(self):
self.phase = self.TERMINATED
raise ParseDone(None)
def handle_endtag(self, tag):
# If we ever see an end of head, body, or html, bail out right away.
# [1]
if tag in ['head', 'body', 'html']:
self._terminate()
def handle_starttag(self, tag, attrs):
# if we ever see a start body tag, bail out right away, since
# we want to prevent the meta tag from appearing in the body
# [2]
if tag=='body':
self._terminate()
if self.phase == self.TOP:
# At the top level, allow a html tag or a head tag to move
# to the head or html phase
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [4]
self.phase = self.HTML
elif self.phase == self.HTML:
# if we are in the html tag, allow a head tag to move to
# the HEAD phase. If we get another html tag, then bail
# out
if tag == 'head':
# [3]
self.phase = self.HEAD
elif tag == 'html':
# [5]
self._terminate()
elif self.phase == self.HEAD:
# If we are in the head phase, look for the appropriate
# meta tag. If we get a head or body tag, bail out.
if tag == 'meta':
attrs_d = dict(attrs)
http_equiv = attrs_d.get('http-equiv', '').lower()
if http_equiv == YADIS_HEADER_NAME.lower():
raw_attr = attrs_d.get('content')
yadis_loc = substituteEntities(raw_attr)
# [6]
self.phase = self.FOUND
raise ParseDone(yadis_loc)
elif tag in ['head', 'html']:
# [5], [7]
self._terminate()
def feed(self, chars):
# [8]
if self.phase in [self.TERMINATED, self.FOUND]:
self._terminate()
return HTMLParser.feed(self, chars)
def findHTMLMeta(stream):
"""Look for a meta http-equiv tag with the YADIS header name.
@param stream: Source of the html text
@type stream: Object that implements a read() method that works
like file.read
@return: The URI from which to fetch the XRDS document
@rtype: str
@raises MetaNotFound: raised with the content that was
searched as the first parameter.
"""
parser = YadisHTMLParser()
chunks = []
while 1:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
# End of file
break
chunks.append(chunk)
try:
parser.feed(chunk)
except HTMLParseError, why:
# HTML parse error, so bail
chunks.append(stream.read())
break
except ParseDone, why:
uri = why[0]
if uri is None:
# Parse finished, but we may need the rest of the file
chunks.append(stream.read())
break
else:
return uri
content = ''.join(chunks)
raise MetaNotFound(content)
| bsd-3-clause |
repotvsupertuga/repo | script.module.urlresolver/lib/urlresolver/plugins/tunepk.py | 4 | 2813 | '''
tunepk urlresolver plugin
Copyright (C) 2013 icharania
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class TunePkResolver(UrlResolver):
name = "tune.pk"
domains = ["tune.pk"]
pattern = '(?://|\.)(tune\.pk)/(?:player|video|play)/(?:[\w\.\?]+=)?(\d+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
link = repr(self.net.http_GET(web_url).content)
if link.find('404 Not Found') >= 0:
raise ResolverError('The requested video was not found.')
videoUrl = []
# borrowed from AJ's turtle-x
html = link.replace('\n\r', '').replace('\r', '').replace('\n', '').replace('\\', '')
sources = re.compile("{(.+?)}").findall(re.compile("sources (.+?)]").findall(html)[0])
for source in sources:
video_link = str(re.compile('"file":"(.*?)"').findall(source)[0])
videoUrl.append(video_link)
vUrl = ''
vUrlsCount = len(videoUrl)
if vUrlsCount > 0:
q = self.get_setting('quality')
if q == '0':
# Highest Quality
vUrl = videoUrl[0]
elif q == '1':
# Medium Quality
vUrl = videoUrl[(int)(vUrlsCount / 2)]
elif q == '2':
# Lowest Quality
vUrl = videoUrl[vUrlsCount - 1]
return vUrl
else:
raise ResolverError('No playable video found.')
def get_url(self, host, media_id):
return 'http://embed.tune.pk/play/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
xml.append('<setting label="Video Quality" id="%s_quality" type="enum" values="High|Medium|Low" default="0" />' % (cls.__name__))
return xml
| gpl-2.0 |
AdrianGaudebert/elmo | vendor-local/lib/python/south/management/commands/schemamigration.py | 10 | 10371 | """
Startmigration command, version 2.
"""
from __future__ import print_function
import sys
import os
import re
import string
import random
import inspect
from optparse import make_option
try:
set
except NameError:
from sets import Set as set
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.conf import settings
from south.migration import Migrations, migrate_app
from south.models import MigrationHistory
from south.exceptions import NoMigrations
from south.creator import changes, actions, freezer
from south.management.commands.datamigration import Command as DataCommand
class Command(DataCommand):
option_list = DataCommand.option_list + (
make_option('--add-model', action='append', dest='added_model_list', type='string',
help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --add-model parameters.'),
make_option('--add-field', action='append', dest='added_field_list', type='string',
help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--add-index', action='append', dest='added_index_list', type='string',
help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--initial', action='store_true', dest='initial', default=False,
help='Generate the initial schema for the app.'),
make_option('--auto', action='store_true', dest='auto', default=False,
help='Attempt to automatically detect differences from the last migration.'),
make_option('--empty', action='store_true', dest='empty', default=False,
help='Make a blank migration.'),
make_option('--update', action='store_true', dest='update', default=False,
help='Update the most recent migration instead of creating a new one. Rollback this migration if it is already applied.'),
)
help = "Creates a new template schema migration for the given app"
usage_str = "Usage: ./manage.py schemamigration appname migrationname [--empty] [--initial] [--auto] [--add-model ModelName] [--add-field ModelName.field_name] [--stdout]"
def handle(self, app=None, name="", added_model_list=None, added_field_list=None, freeze_list=None, initial=False, auto=False, stdout=False, added_index_list=None, verbosity=1, empty=False, update=False, **options):
# Any supposed lists that are None become empty lists
added_model_list = added_model_list or []
added_field_list = added_field_list or []
added_index_list = added_index_list or []
freeze_list = freeze_list or []
# --stdout means name = -
if stdout:
name = "-"
# Only allow valid names
if re.search('[^_\w]', name) and name != "-":
self.error("Migration names should contain only alphanumeric characters and underscores.")
# Make sure options are compatable
if initial and (added_model_list or added_field_list or auto):
self.error("You cannot use --initial and other options together\n" + self.usage_str)
if auto and (added_model_list or added_field_list or initial):
self.error("You cannot use --auto and other options together\n" + self.usage_str)
if not app:
self.error("You must provide an app to create a migration for.\n" + self.usage_str)
# See if the app exists
app = app.split(".")[-1]
try:
app_module = models.get_app(app)
except ImproperlyConfigured:
print("There is no enabled application matching '%s'." % app)
return
# Get the Migrations for this app (creating the migrations dir if needed)
migrations = Migrations(app, force_creation=True, verbose_creation=int(verbosity) > 0)
# What actions do we need to do?
if auto:
# Get the old migration
try:
last_migration = migrations[-2 if update else -1]
except IndexError:
self.error("You cannot use --auto on an app with no migrations. Try --initial.")
# Make sure it has stored models
if migrations.app_label() not in getattr(last_migration.migration_class(), "complete_apps", []):
self.error("You cannot use automatic detection, since the previous migration does not have this whole app frozen.\nEither make migrations using '--freeze %s' or set 'SOUTH_AUTO_FREEZE_APP = True' in your settings.py." % migrations.app_label())
# Alright, construct two model dicts to run the differ on.
old_defs = dict(
(k, v) for k, v in last_migration.migration_class().models.items()
if k.split(".")[0] == migrations.app_label()
)
new_defs = dict(
(k, v) for k, v in freezer.freeze_apps([migrations.app_label()]).items()
if k.split(".")[0] == migrations.app_label()
)
change_source = changes.AutoChanges(
migrations = migrations,
old_defs = old_defs,
old_orm = last_migration.orm(),
new_defs = new_defs,
)
elif initial:
# Do an initial migration
change_source = changes.InitialChanges(migrations)
else:
# Read the commands manually off of the arguments
if (added_model_list or added_field_list or added_index_list):
change_source = changes.ManualChanges(
migrations,
added_model_list,
added_field_list,
added_index_list,
)
elif empty:
change_source = None
else:
print("You have not passed any of --initial, --auto, --empty, --add-model, --add-field or --add-index.", file=sys.stderr)
sys.exit(1)
# Validate this so we can access the last migration without worrying
if update and not migrations:
self.error("You cannot use --update on an app with no migrations.")
# if not name, there's an error
if not name:
if change_source:
name = change_source.suggest_name()
if update:
name = re.sub(r'^\d{4}_', '', migrations[-1].name())
if not name:
self.error("You must provide a name for this migration\n" + self.usage_str)
# Get the actions, and then insert them into the actions lists
forwards_actions = []
backwards_actions = []
if change_source:
for action_name, params in change_source.get_changes():
# Run the correct Action class
try:
action_class = getattr(actions, action_name)
except AttributeError:
raise ValueError("Invalid action name from source: %s" % action_name)
else:
action = action_class(**params)
action.add_forwards(forwards_actions)
action.add_backwards(backwards_actions)
print(action.console_line(), file=sys.stderr)
# Nowt happen? That's not good for --auto.
if auto and not forwards_actions:
self.error("Nothing seems to have changed.")
# Work out which apps to freeze
apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list)
# So, what's in this file, then?
file_contents = MIGRATION_TEMPLATE % {
"forwards": "\n".join(forwards_actions or [" pass"]),
"backwards": "\n".join(backwards_actions or [" pass"]),
"frozen_models": freezer.freeze_apps_to_string(apps_to_freeze),
"complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or ""
}
# Deal with update mode as late as possible, avoid a rollback as long
# as something else can go wrong.
if update:
last_migration = migrations[-1]
if MigrationHistory.objects.filter(applied__isnull=False, app_name=app, migration=last_migration.name()):
print("Migration to be updated, %s, is already applied, rolling it back now..." % last_migration.name(), file=sys.stderr)
migrate_app(migrations, 'current-1', verbosity=verbosity)
for ext in ('py', 'pyc'):
old_filename = "%s.%s" % (os.path.join(migrations.migrations_dir(), last_migration.filename), ext)
if os.path.isfile(old_filename):
os.unlink(old_filename)
migrations.remove(last_migration)
# See what filename is next in line. We assume they use numbers.
new_filename = migrations.next_filename(name)
# - is a special name which means 'print to stdout'
if name == "-":
print(file_contents)
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w")
fp.write(file_contents)
fp.close()
verb = 'Updated' if update else 'Created'
if empty:
print("%s %s. You must now edit this migration and add the code for each direction." % (verb, new_filename), file=sys.stderr)
else:
print("%s %s. You can now apply this migration with: ./manage.py migrate %s" % (verb, new_filename, app), file=sys.stderr)
MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
%(forwards)s
def backwards(self, orm):
%(backwards)s
models = %(frozen_models)s
%(complete_apps)s"""
| mpl-2.0 |
stackforge/poppy | tests/unit/storage/cassandra/test_services.py | 2 | 27911 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
try:
import ordereddict as collections
except ImportError: # pragma: no cover
import collections # pragma: no cover
import ddt
import mock
from oslo_config import cfg
import testtools
from poppy.model.helpers import provider_details
from poppy.storage.cassandra import driver
from poppy.storage.cassandra import services
from poppy.transport.pecan.models.request import service as req_service
from tests.unit import base
@ddt.ddt
class CassandraStorageServiceTests(base.TestCase):
def setUp(self):
super(CassandraStorageServiceTests, self).setUp()
# mock arguments to use
self.project_id = '123456'
self.service_id = uuid.uuid4()
self.service_name = 'mocksite'
# create mocked config and driver
conf = cfg.ConfigOpts()
conf.register_opt(
cfg.StrOpt(
'datacenter',
default='',
help='datacenter where the C* cluster hosted'))
conf.register_opts(driver.CASSANDRA_OPTIONS,
group=driver.CASSANDRA_GROUP)
cassandra_driver = driver.CassandraStorageDriver(conf)
migrations_patcher = mock.patch(
'cdeploy.migrator.Migrator'
)
migrations_patcher.start()
self.addCleanup(migrations_patcher.stop)
cluster_patcher = mock.patch('cassandra.cluster.Cluster')
self.mock_cluster = cluster_patcher.start()
self.mock_session = self.mock_cluster().connect()
self.addCleanup(cluster_patcher.stop)
# stubbed cassandra driver
self.sc = services.ServicesController(cassandra_driver)
@ddt.file_data('data_get_service.json')
def test_get_service(self, value):
# mock the response from cassandra
value[0]['service_id'] = self.service_id
self.mock_session.execute.return_value = value
actual_response = self.sc.get_service(self.project_id, self.service_id)
# TODO(amitgandhinz): assert the response
# matches the expectation (using jsonschema)
self.assertEqual(str(actual_response.service_id), str(self.service_id))
@ddt.file_data('data_get_service.json')
def test_update_state(self, value):
details = value[0]['provider_details']
new_details = {}
for provider, detail in list(details.items()):
detail = json.loads(detail)
detail['status'] = 'deployed'
detail['access_urls'] = [
{
'provider_url': "{0}.com".format(provider.lower()),
'domain': detail['access_urls'][0]
}
]
new_details[provider] = json.dumps(detail)
value[0]['provider_details'] = new_details
# mock the response from cassandra
value[0]['service_id'] = self.service_id
self.mock_session.execute.return_value = [value[0]]
expected_obj = self.sc.get_service(self.project_id, self.service_id)
actual_obj = self.sc.update_state(self.project_id, self.service_id,
'deployed')
self.assertEqual(expected_obj.service_id, actual_obj.service_id)
def test_get_service_with_exception(self):
# mock the response from cassandra
self.mock_session.execute.return_value = []
self.assertRaises(
ValueError,
self.sc.get_service,
self.project_id,
self.service_id
)
@ddt.file_data('../data/data_create_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=False)
def test_create_service(self, value, mock_check):
service_obj = req_service.load_from_json(value)
responses = self.sc.create_service(self.project_id, service_obj)
# Expect the response to be None as there are no providers passed
# into the driver to respond to this call
self.assertEqual(responses, None)
# TODO(amitgandhinz): need to validate the create to cassandra worked.
@ddt.file_data('../data/data_create_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=True)
def test_create_service_exist(self, value, mock_check):
service_obj = req_service.load_from_json(value)
self.sc.get = mock.Mock(return_value=service_obj)
self.assertRaises(
ValueError,
self.sc.create_service,
self.project_id, service_obj
)
@ddt.file_data('data_list_services.json')
def test_list_services(self, value):
# mock the response from cassandra
value[0]['project_id'] = self.project_id
self.mock_session.prepare.return_value = mock.Mock()
self.mock_session.execute.return_value = value
actual_response = self.sc.get_services(self.project_id, None, None)
# TODO(amitgandhinz): assert the response
# matches the expectation (using jsonschema)
self.assertEqual(actual_response[0].name, "mocksite")
self.assertEqual(actual_response[0].project_id, self.project_id)
@ddt.file_data('data_get_service.json')
def test_delete_service(self, value):
details = value[0]['provider_details']
new_details = {}
for provider, detail in list(details.items()):
detail = json.loads(detail)
detail['status'] = 'deployed'
detail['access_urls'] = [
{
'provider_url': "{0}.com".format(provider.lower()),
'domain': detail['access_urls'][0]
}
]
new_details[provider] = json.dumps(detail)
value[0]['provider_details'] = new_details
# mock the response from cassandra
value[0]['service_id'] = self.service_id
# self.mock_session.execute.return_value = value
def mock_execute_side_effect(*args):
if args[0].query_string == services.CQL_GET_SERVICE:
return [value[0]]
else:
return None
self.mock_session.execute.side_effect = mock_execute_side_effect
self.sc.delete_service(
self.project_id,
self.service_id
)
# TODO(isaacm): Add assertions on queries called
def test_delete_service_no_result(self):
# mock the response from cassandra
self.mock_session.execute.return_value = iter([{}])
actual_response = self.sc.delete_service(
self.project_id,
self.service_id
)
# Expect the response to be None as there are no providers passed
# into the driver to respond to this call
self.assertEqual(actual_response, None)
@ddt.file_data('../data/data_update_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=False)
@mock.patch.object(services.ServicesController,
'set_service_provider_details')
def test_update_service(self, service_json,
mock_set_service_provider_details,
mock_check):
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": "{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }",
}
self.mock_session.execute.return_value = iter([{}])
service_obj = req_service.load_from_json(service_json)
actual_response = self.sc.update_service(
self.project_id,
self.service_id,
service_obj
)
# Expect the response to be None as there are no
# providers passed into the driver to respond to this call
self.assertEqual(actual_response, None)
@ddt.file_data('data_provider_details.json')
def test_get_provider_details(self, provider_details_json):
# mock the response from cassandra
self.mock_session.execute.return_value = [
{'provider_details': provider_details_json}
]
actual_response = self.sc.get_provider_details(
self.project_id,
self.service_id
)
self.assertTrue("MaxCDN" in actual_response)
self.assertTrue("Mock" in actual_response)
self.assertTrue("CloudFront" in actual_response)
self.assertTrue("Fastly" in actual_response)
@ddt.file_data('data_provider_details.json')
def test_get_provider_details_value_error(self, provider_details_json):
# mock the response from cassandra
self.mock_session.execute.return_value = []
with testtools.ExpectedException(ValueError):
self.sc.get_provider_details(
self.project_id,
self.service_id
)
@ddt.file_data('data_provider_details.json')
def test_update_provider_details(self, provider_details_json):
provider_details_dict = {}
for k, v in provider_details_json.items():
provider_detail_dict = json.loads(v)
provider_details_dict[k] = provider_details.ProviderDetail(
provider_service_id=(
provider_detail_dict["id"]),
access_urls=provider_detail_dict["access_urls"],
domains_certificate_status=provider_detail_dict.get(
"domains_certificate_status", {}))
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
for provider_name in provider_details_dict:
the_provider_detail_dict = collections.OrderedDict()
the_provider_detail_dict["id"] = (
provider_details_dict[provider_name].provider_service_id)
the_provider_detail_dict["access_urls"] = (
provider_details_dict[provider_name].access_urls)
the_provider_detail_dict["status"] = (
provider_details_dict[provider_name].status)
status = the_provider_detail_dict["status"]
the_provider_detail_dict["name"] = (
provider_details_dict[provider_name].name)
the_provider_detail_dict["domains_certificate_status"] = (
provider_details_dict[provider_name].
domains_certificate_status.to_dict())
the_provider_detail_dict["error_info"] = (
provider_details_dict[provider_name].error_info)
the_provider_detail_dict["error_message"] = (
provider_details_dict[provider_name].error_message)
arg_provider_details_dict[provider_name] = json.dumps(
the_provider_detail_dict)
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": # "{\"id\": 11942, \"access_urls\": "
# "[{\"provider_url\": \"maxcdn.provider.com\", "
# "\"domain\": \"xk.cd\"}], "
# "\"domains_certificate_status\":"
# "{\"mypullzone.com\": "
# "\"failed\"} }",
provider_details.ProviderDetail(
provider_service_id='{}',
access_urls=[]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
@ddt.file_data('data_provider_details.json')
def test_update_provider_details_domain_deleted(
self,
provider_details_json,
):
provider_details_dict = {}
for k, v in provider_details_json.items():
provider_detail_dict = json.loads(v)
provider_details_dict[k] = provider_details.ProviderDetail(
provider_service_id=(
provider_detail_dict["id"]),
access_urls=provider_detail_dict["access_urls"],
domains_certificate_status=provider_detail_dict.get(
"domains_certificate_status", {}))
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
for provider_name in provider_details_dict:
the_provider_detail_dict = collections.OrderedDict()
the_provider_detail_dict["id"] = (
provider_details_dict[provider_name].provider_service_id)
the_provider_detail_dict["access_urls"] = (
provider_details_dict[provider_name].access_urls)
the_provider_detail_dict["status"] = (
provider_details_dict[provider_name].status)
status = the_provider_detail_dict["status"]
the_provider_detail_dict["name"] = (
provider_details_dict[provider_name].name)
the_provider_detail_dict["domains_certificate_status"] = (
provider_details_dict[provider_name].
domains_certificate_status.to_dict())
the_provider_detail_dict["error_info"] = (
provider_details_dict[provider_name].error_info)
the_provider_detail_dict["error_message"] = (
provider_details_dict[provider_name].error_message)
arg_provider_details_dict[provider_name] = json.dumps(
the_provider_detail_dict)
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": provider_details.ProviderDetail(
provider_service_id=(
"{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk2.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }"
),
access_urls=[
{
"provider_url": "fastly.provider.com",
"domain": "xk2.cd"
}
]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
delete_queries = []
deleted_domains = []
for query_mock_call in self.sc.session.execute.mock_calls:
name, args, kwargs = query_mock_call
for arg in args:
if hasattr(arg, 'query_string'):
if (
arg.query_string ==
services.CQL_DELETE_PROVIDER_URL
):
delete_queries.append(query_mock_call)
_, delete_query_args = args
deleted_domains.append(
delete_query_args["domain_name"])
self.assertEqual(1, len(delete_queries))
self.assertEqual(['xk2.cd'], deleted_domains)
self.assertTrue(self.sc.session.execute.called)
def test_update_provider_details_new_provider_details_empty(self):
provider_details_dict = {}
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": provider_details.ProviderDetail(
provider_service_id=(
"{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk2.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }"
),
access_urls=[
{
"provider_url": "fastly.provider.com",
"domain": "xk2.cd"
}
]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
delete_queries = []
deleted_domains = []
for query_mock_call in self.sc.session.execute.mock_calls:
name, args, kwargs = query_mock_call
for arg in args:
if hasattr(arg, 'query_string'):
if (
arg.query_string ==
services.CQL_DELETE_PROVIDER_URL
):
delete_queries.append(query_mock_call)
_, delete_query_args = args
deleted_domains.append(
delete_query_args["domain_name"])
self.assertEqual(1, len(delete_queries))
self.assertEqual(['xk2.cd'], deleted_domains)
self.assertTrue(self.sc.session.execute.called)
def test_session(self):
session = self.sc.session
self.assertNotEqual(session, None)
def test_domain_exists_elsewhere_true(self):
self.mock_session.execute.return_value = [
{
'service_id': 'service_id',
'project_id': 'project_id',
'domain_name': 'domain_name'
}
]
self.assertTrue(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_domain_exists_elsewhere_false(self):
self.mock_session.execute.return_value = [
{
'service_id': 'service_id',
'project_id': 'project_id',
'domain_name': 'domain_name'
}
]
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'service_id'))
def test_domain_exists_elsewhere_no_results(self):
self.mock_session.execute.return_value = []
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_domain_exists_elsewhere_value_error(self):
self.mock_session.execute.side_effect = ValueError(
'Mock -- Something went wrong!'
)
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_get_service_count_positive(self):
self.mock_session.execute.return_value = [
{
'count': 1
}
]
self.assertEqual(1, self.sc.get_service_count('project_id'))
@ddt.file_data('data_list_services.json')
def test_get_services_marker_not_none(self, data):
self.mock_session.execute.return_value = data
results = self.sc.get_services('project_id', uuid.uuid4(), 1)
self.assertEqual(data[0]["project_id"], results[0].project_id)
def test_get_services_by_status_positive(self):
self.mock_session.execute.return_value = [
{'service_id': 1},
{'service_id': 2},
{'service_id': 3}
]
self.assertEqual(
[
{'service_id': '1'},
{'service_id': '2'},
{'service_id': '3'}
],
self.sc.get_services_by_status('project_id')
)
def test_delete_services_by_status_positive(self):
try:
self.sc.delete_services_by_status(
'project_id', uuid.uuid4(), 'status'
)
except Exception as e:
self.fail(e)
def test_get_domains_by_provider_url_positive(self):
self.mock_session.execute.return_value = [
{'domain_name': 'www.xyz.com'},
]
self.assertEqual([{'domain_name': 'www.xyz.com'}],
self.sc.get_domains_by_provider_url('provider_url'))
def test_delete_provider_url_positive(self):
try:
self.sc.delete_provider_url('provider_url', 'domain_name')
except Exception as e:
self.fail(e)
def test_get_service_limit_positive(self):
self.mock_session.execute.return_value = [
{'project_limit': 999}
]
self.assertEqual(999, self.sc.get_service_limit('project_id'))
def test_get_service_limit_empty_result(self):
self.mock_session.execute.return_value = []
self.assertEqual(
self.sc._driver.max_services_conf.max_services_per_project,
self.sc.get_service_limit('project_id'))
def test_get_service_limit_value_error(self):
self.mock_session.execute.side_effect = ValueError(
'Mock -- Something went wrong!'
)
self.assertEqual(
self.sc._driver.max_services_conf.max_services_per_project,
self.sc.get_service_limit('project_id')
)
def test_set_service_limit_positive(self):
try:
self.sc.set_service_limit('project_id', 'project_limit')
except Exception as e:
self.fail(e)
@ddt.file_data('data_list_services.json')
def test_get_service_details_by_domain_name(self, data):
service_id = uuid.uuid4()
self.mock_session.execute.side_effect = [
[{
'project_id': 'project_id',
'service_id': service_id,
'domain_name': 'domain_name'
}],
[data[0]]
]
results = self.sc.get_service_details_by_domain_name('domain_name')
self.assertEqual(data[0]["project_id"], results.project_id)
@ddt.file_data('data_list_services.json')
def test_get_service_details_by_domain_name_domain_not_present(
self, data):
self.mock_session.execute.side_effect = [
[{
'project_id': 'proj_id', # differs from arg to func
'service_id': uuid.uuid4(),
'domain_name': 'domain_name'
}],
[data[0]]
]
with testtools.ExpectedException(ValueError):
self.sc.get_service_details_by_domain_name(
'domain_name',
project_id='project_id'
)
@ddt.file_data('data_provider_details.json')
def test_set_service_provider_details(self, data):
service_id = uuid.uuid4()
def mock_execute_side_effect(*args):
if args[0].query_string == services.CQL_GET_PROVIDER_DETAILS:
return [{'provider_details': data}]
else:
return None
self.mock_session.execute.side_effect = mock_execute_side_effect
self.sc.set_service_provider_details(
'project_id', service_id, 'deployed'
)
[
update_service_status,
get_provider_details,
_,
update_provider_details,
_,
_,
_,
_,
_,
] = self.mock_session.execute.mock_calls
self.assertEqual(services.CQL_SET_SERVICE_STATUS,
update_service_status[1][0].query_string)
self.assertEqual(services.CQL_GET_PROVIDER_DETAILS,
get_provider_details[1][0].query_string)
self.assertEqual(services.CQL_UPDATE_PROVIDER_DETAILS,
update_provider_details[1][0].query_string)
| apache-2.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/numpy/polynomial/hermite_e.py | 23 | 58014 | """
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For Numpy versions >= 1.11 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = hermevander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = hermevander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
| gpl-3.0 |
theshteves/tweet-the-wolf | pips/oauthlib/oauth2/rfc6749/request_validator.py | 33 | 19622 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import logging
log = logging.getLogger(__name__)
class RequestValidator(object):
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: http://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure that the authorization process represented by this authorization
code began with this 'redirect_uri'.
If the client specifies a redirect_uri when obtaining code then that
redirect URI must be bound to the code and verified equal in this
method, according to RFC 6749 section 4.1.3. Do not compare against
the client's allowed redirect URIs, but against the URI used when the
code was saved.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier
:param code: The authorization code grant (request.code).
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (client_id)
- the redirect URI used (request.redirect_uri)
- a resource owner / user (request.user)
- the authorized scopes (request.scopes)
- the client state, if given (code.get('state'))
The 'code' argument is actually a dictionary, containing at least a
'code' key with the actual authorization code:
{'code': 'sdf345jsdf0934f'}
It may also have a 'state' key containing a nonce for the client, if it
chose to send one. That value should be saved and used in
'validate_code'.
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant and, optionally, state.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes
:param client_id: Unicode client identifier
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: The HTTP Request (oauthlib.common.Request)
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Verify that the authorization_code is valid and assigned to the given
client.
Before returning true, set the following based on the information stored
with the code in 'save_authorization_code':
- request.user
- request.state (if given)
- request.scopes
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes
must also be set.
:param client_id: Unicode client identifier
:param code: Unicode authorization code
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param redirect_uri: Unicode absolute URI
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier
:param scopes: List of scopes (defined by you)
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
| mit |
theheros/kbengine | kbe/src/lib/python/Lib/idlelib/ScrolledList.py | 67 | 3997 | from tkinter import *
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def test():
root = Tk()
root.protocol("WM_DELETE_WINDOW", root.destroy)
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="pass")
def on_select(self, index): print("select", self.get(index))
def on_double(self, index): print("double", self.get(index))
s = MyScrolledList(root)
for i in range(30):
s.append("item %02d" % i)
return root
def main():
root = test()
root.mainloop()
if __name__ == '__main__':
main()
| lgpl-3.0 |
eugenejen/AutobahnPython | autobahn/autobahn/twisted/choosereactor.py | 34 | 3602 | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['install_optimal_reactor','install_reactor']
def install_optimal_reactor():
"""
Try to install the optimal Twisted reactor for platform.
"""
import sys
if 'bsd' in sys.platform or sys.platform.startswith('darwin'):
try:
v = sys.version_info
if v[0] == 1 or (v[0] == 2 and v[1] < 6) or (v[0] == 2 and v[1] == 6 and v[2] < 5):
raise Exception("Python version too old (%s)" % sys.version)
from twisted.internet import kqreactor
kqreactor.install()
except Exception as e:
print("""
WARNING: Running on BSD or Darwin, but cannot use kqueue Twisted reactor.
=> %s
To use the kqueue Twisted reactor, you will need:
1. Python >= 2.6.5 or PyPy > 1.8
2. Twisted > 12.0
Note the use of >= and >.
Will let Twisted choose a default reactor (potential performance degradation).
""" % str(e))
pass
if sys.platform in ['win32']:
try:
from twisted.application.reactors import installReactor
installReactor("iocp")
except Exception as e:
print("""
WARNING: Running on Windows, but cannot use IOCP Twisted reactor.
=> %s
Will let Twisted choose a default reactor (potential performance degradation).
""" % str(e))
if sys.platform.startswith('linux'):
try:
from twisted.internet import epollreactor
epollreactor.install()
except Exception as e:
print("""
WARNING: Running on Linux, but cannot use Epoll Twisted reactor.
=> %s
Will let Twisted choose a default reactor (potential performance degradation).
""" % str(e))
def install_reactor(explicitReactor = None, verbose = False):
"""
Install Twisted reactor.
:param explicitReactor: If provided, install this reactor. Else, install optimal reactor.
:type explicitReactor: obj
:param verbose: If `True`, print what happens.
:type verbose: bool
"""
import sys
if explicitReactor:
## install explicitly given reactor
##
from twisted.application.reactors import installReactor
print("Trying to install explicitly specified Twisted reactor '%s'" % explicitReactor)
try:
installReactor(explicitReactor)
except Exception as e:
print("Could not install Twisted reactor %s%s" % (explicitReactor, ' ["%s"]' % e if verbose else ''))
sys.exit(1)
else:
## automatically choose optimal reactor
##
if verbose:
print("Automatically choosing optimal Twisted reactor")
install_optimal_reactor()
## now the reactor is installed, import it
from twisted.internet import reactor
if verbose:
from twisted.python.reflect import qual
print("Running Twisted reactor %s" % qual(reactor.__class__))
return reactor
| apache-2.0 |
gem5/linux-arm-gem5 | tools/perf/tests/attr.py | 1266 | 9424 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
georgschoelly/midata-etat | main.py | 1 | 1304 | #!/usr/bin/env python3
import requests
import midata
from midata import authentication as auth
# input data
server = "https://db.scout.ch"
email = "user@pbs.ch"
password = "password"
# log in
auth_info = auth.sign_in(server, email, password)
if not auth_info:
pass
user = midata.get_person(auth_info, auth_info.user_id)
# figure out interesting groups
valid_group_roles = {'Abteilungsleiter'}
group_lookup = {group['id']:group for group in user.groups}
roles = (role for role in user.roles
if role.role_type in valid_group_roles)
groups = [group_lookup[role.group] for role in roles]
# SELECT ONE
group_id = '15'
def group_hierarchy(group_id):
group = midata.get_group(auth_info, group_id)
children = {child_id: group_hierarchy(child_id)
for child_id in group.children}
return children
hierarchy = {group_id: group_hierarchy(group_id)}
def flatten(d):
for (k,v) in d.items():
yield(k)
yield from flatten(v)
# grab all people and roles
group_members = {group_id: midata.get_members(auth_info, group_id)
for group_id in flatten(hierarchy)}
for group_id, members in group_members.items():
print(group_id)
print("=======")
for member in members:
print(member)
print()
| gpl-3.0 |
andrius-preimantas/purchase-workflow | purchase_transport_document/model/transport_document.py | 25 | 1094 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2014 Camptocamp SA
# Author: Leonardo Pistone
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class TransportDocument(models.Model):
_name = "transport.document"
name = fields.Char('Name', translate=True)
| agpl-3.0 |
hassanabidpk/django | tests/template_tests/filter_tests/test_lower.py | 388 | 1155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import lower
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LowerTests(SimpleTestCase):
@setup({'lower01': '{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}'})
def test_lower01(self):
output = self.engine.render_to_string('lower01', {"a": "Apple & banana", "b": mark_safe("Apple & banana")})
self.assertEqual(output, "apple & banana apple & banana")
@setup({'lower02': '{{ a|lower }} {{ b|lower }}'})
def test_lower02(self):
output = self.engine.render_to_string('lower02', {"a": "Apple & banana", "b": mark_safe("Apple & banana")})
self.assertEqual(output, "apple & banana apple & banana")
class FunctionTests(SimpleTestCase):
def test_lower(self):
self.assertEqual(lower('TEST'), 'test')
def test_unicode(self):
# uppercase E umlaut
self.assertEqual(lower('\xcb'), '\xeb')
def test_non_string_input(self):
self.assertEqual(lower(123), '123')
| bsd-3-clause |
bspink/django | django/conf/locale/ml/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
shanemikel/beets | extra/release.py | 24 | 8554 | #!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print("No pattern found in {}".format(filename))
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = '{} (in development)'.format(version)
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--no-wrap'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf8'))
md = stdout.decode('utf8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_output(['git', 'tag', 'v{}'.format(cur_version)])
# Build.
with chdir(BASE):
subprocess.check_call(['python2', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = u'.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', 'beets-{}.tar.gz'.format(version))
subprocess.check_call(['twine', 'upload', path])
if __name__ == '__main__':
release()
| mit |
kermitfr/kermit-webui | src/webui/scheduler/models.py | 1 | 1725 | '''
Created on Nov 12, 2012
@author: mmornati
'''
from djcelery.models import PeriodicTask, IntervalSchedule
from django.db import models
class TaskScheduler(models.Model):
periodic_task = models.ForeignKey(PeriodicTask)
@staticmethod
def schedule_every(task_name, task, period, every, args=None, kwargs=None):
permissible_periods = ['days', 'hours', 'minutes', 'seconds']
if period not in permissible_periods:
raise Exception('Invalid period specified')
# create the periodic task and the interval
interval_schedules = IntervalSchedule.objects.filter(period=period, every=every)
if interval_schedules: # just check if interval schedules exist like that already and reuse em
interval_schedule = interval_schedules[0]
else: # create a brand new interval schedule
interval_schedule = IntervalSchedule()
interval_schedule.every = every # should check to make sure this is a positive int
interval_schedule.period = period
interval_schedule.save()
ptask = PeriodicTask(name=task_name, task=task, interval=interval_schedule)
if args:
ptask.args = args
if kwargs:
ptask.kwargs = kwargs
ptask.save()
return TaskScheduler.objects.create(periodic_task=ptask)
def stop(self):
""" pauses the task """
ptask = self.periodic_task
ptask.enabled = False
ptask.save()
def start(self):
ptask = self.periodic_task
ptask.enabled = True
ptask.save()
def terminate(self):
self.stop()
ptask = self.periodic_task
self.delete()
ptask.delete() | gpl-3.0 |
felipebetancur/numpy | numpy/lib/tests/test_shape_base.py | 44 | 12658 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
vsplit, dstack, kron, tile
)
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_raises, assert_warns
)
class TestApplyAlongAxis(TestCase):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_simple101(self, level=11):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_3d(self):
a = np.arange(27).reshape((3, 3, 3))
assert_array_equal(apply_along_axis(np.sum, 0, a),
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
class TestApplyOverAxes(TestCase):
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
class TestArraySplit(TestCase):
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
def test_integer_split(self):
a = np.arange(10)
res = array_split(a, 1)
desired = [np.arange(10)]
compare_results(res, desired)
res = array_split(a, 2)
desired = [np.arange(5), np.arange(5, 10)]
compare_results(res, desired)
res = array_split(a, 3)
desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
compare_results(res, desired)
res = array_split(a, 4)
desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
np.arange(8, 10)]
compare_results(res, desired)
res = array_split(a, 5)
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
np.arange(6, 8), np.arange(8, 10)]
compare_results(res, desired)
res = array_split(a, 6)
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 7)
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 8)
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 9)
desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 10)
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 11)
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
np.arange(9, 10), np.array([])]
compare_results(res, desired)
def test_integer_split_2D_rows(self):
a = np.array([np.arange(10), np.arange(10)])
res = assert_warns(FutureWarning, array_split, a, 3, axis=0)
# After removing the FutureWarning, the last should be zeros((0, 10))
desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
np.array([])]
compare_results(res, desired)
assert_(a.dtype.type is res[-1].dtype.type)
def test_integer_split_2D_cols(self):
a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3, axis=-1)
desired = [np.array([np.arange(4), np.arange(4)]),
np.array([np.arange(4, 7), np.arange(4, 7)]),
np.array([np.arange(7, 10), np.arange(7, 10)])]
compare_results(res, desired)
def test_integer_split_2D_default(self):
""" This will fail if we change default axis
"""
a = np.array([np.arange(10), np.arange(10)])
res = assert_warns(FutureWarning, array_split, a, 3)
# After removing the FutureWarning, the last should be zeros((0, 10))
desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
np.array([])]
compare_results(res, desired)
assert_(a.dtype.type is res[-1].dtype.type)
# perhaps should check higher dimensions
def test_index_split_simple(self):
a = np.arange(10)
indices = [1, 5, 7]
res = array_split(a, indices, axis=-1)
desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_low_bound(self):
a = np.arange(10)
indices = [0, 5, 7]
res = array_split(a, indices, axis=-1)
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_high_bound(self):
a = np.arange(10)
indices = [0, 5, 7, 10, 12]
res = array_split(a, indices, axis=-1)
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
np.arange(7, 10), np.array([]), np.array([])]
compare_results(res, desired)
class TestSplit(TestCase):
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
def test_equal_split(self):
a = np.arange(10)
res = split(a, 2)
desired = [np.arange(5), np.arange(5, 10)]
compare_results(res, desired)
def test_unequal_split(self):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
class TestDstack(TestCase):
def test_0D_array(self):
a = np.array(1)
b = np.array(2)
res = dstack([a, b])
desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = np.array([1])
b = np.array([2])
res = dstack([a, b])
desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = np.array([[1], [2]])
b = np.array([[1], [2]])
res = dstack([a, b])
desired = np.array([[[1, 1]], [[2, 2, ]]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = np.array([1, 2])
b = np.array([1, 2])
res = dstack([a, b])
desired = np.array([[[1, 1], [2, 2]]])
assert_array_equal(res, desired)
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
class TestHsplit(TestCase):
"""Only testing for integer splits.
"""
def test_0D_array(self):
a = np.array(1)
try:
hsplit(a, 2)
assert_(0)
except ValueError:
pass
def test_1D_array(self):
a = np.array([1, 2, 3, 4])
res = hsplit(a, 2)
desired = [np.array([1, 2]), np.array([3, 4])]
compare_results(res, desired)
def test_2D_array(self):
a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
res = hsplit(a, 2)
desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
compare_results(res, desired)
class TestVsplit(TestCase):
"""Only testing for integer splits.
"""
def test_1D_array(self):
a = np.array([1, 2, 3, 4])
try:
vsplit(a, 2)
assert_(0)
except ValueError:
pass
def test_2D_array(self):
a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
res = vsplit(a, 2)
desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
compare_results(res, desired)
class TestDsplit(TestCase):
# Only testing for integer splits.
def test_2D_array(self):
a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
try:
dsplit(a, 2)
assert_(0)
except ValueError:
pass
def test_3D_array(self):
a = np.array([[[1, 2, 3, 4],
[1, 2, 3, 4]],
[[1, 2, 3, 4],
[1, 2, 3, 4]]])
res = dsplit(a, 2)
desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
compare_results(res, desired)
class TestSqueeze(TestCase):
def test_basic(self):
from numpy.random import rand
a = rand(20, 10, 10, 1, 1)
b = rand(20, 1, 10, 1, 20)
c = rand(1, 1, 20, 10)
assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
# Squeezing to 0-dim should still give an ndarray
a = [[[1.5]]]
res = np.squeeze(a)
assert_equal(res, 1.5)
assert_equal(res.ndim, 0)
assert_equal(type(res), np.ndarray)
class TestKron(TestCase):
def test_return_type(self):
a = np.ones([2, 2])
m = np.asmatrix(a)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(m, m)), np.matrix)
assert_equal(type(kron(a, m)), np.matrix)
assert_equal(type(kron(m, a)), np.matrix)
class myarray(np.ndarray):
__array_priority__ = 0.0
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
assert_equal(type(kron(a, ma)), np.ndarray)
assert_equal(type(kron(ma, a)), myarray)
class TestTile(TestCase):
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
[1, 2, 1, 2], [3, 4, 3, 4]])
def test_tile_one_repetition_on_array_gh4679(self):
a = np.arange(5)
b = tile(a, 1)
b += 2
assert_equal(a, np.arange(5))
def test_empty(self):
a = np.array([[[]]])
b = np.array([[], []])
c = tile(b, 2).shape
d = tile(a, (3, 2, 5)).shape
assert_equal(c, (2, 0))
assert_equal(d, (3, 2, 0))
def test_kroncompare(self):
from numpy.random import randint
reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
for s in shape:
b = randint(0, 10, size=s)
for r in reps:
a = np.ones(r, b.dtype)
large = tile(b, r)
klarge = kron(a, b)
assert_equal(large, klarge)
class TestMayShareMemory(TestCase):
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
self.assertTrue(np.may_share_memory(d, d))
self.assertTrue(np.may_share_memory(d, d[::-1]))
self.assertTrue(np.may_share_memory(d, d[::2]))
self.assertTrue(np.may_share_memory(d, d[1:, ::-1]))
self.assertFalse(np.may_share_memory(d[::-1], d2))
self.assertFalse(np.may_share_memory(d[::2], d2))
self.assertFalse(np.may_share_memory(d[1:, ::-1], d2))
self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
indextbag/arsenalsuite | python/apps/farm_stats/farmreport.py | 11 | 5364 |
from blur.Stone import *
from blur.Stonegui import *
from PyQt4.QtCore import *
from blur.defaultdict import *
class FarmReport:
def __init__(self,name):
self.name = name
self.records = RecordList()
self.columns = []
self.headerLabels = []
self.model = None
def generate(self,startDate,endDate):
self.startDate = startDate
self.endDate = endDate
pass
def createModel( self, parent = None ):
ret = RecordListModel(parent)
self.setupModel(ret)
return ret
def setupModel( self, model ):
model.setRootList( self.records )
model.setColumns( self.columns )
user_slave_summary_table = None
user_render_summary_table = None
def loadTables():
global user_slave_summary_table
global user_render_summary_table
global user_job_count_table
# Create the table for this function
if not user_slave_summary_table:
Database.current().schema().mergeXmlSchema( 'stats_schema.xml' )
user_slave_summary_table = Database.current().tableByName( 'user_slave_summary' )
user_render_summary_table = Database.current().tableByName( 'user_render_summary' )
user_job_count_table = Database.current().tableByName( 'user_job_counts' )
class UserHostSlaveReportModel(RecordListModel):
def __init__(self,parent = None):
RecordListModel.__init__(self,parent)
def compare( self, r1, c1, r2, c2 ):
if c1.toLower() == 'hours':
d1 = r1.getValue(c1).toDouble()[0]
d2 = r2.getValue(c2).toDouble()[0]
if d1 > d2: return 1
if d2 > d1: return -1
return 0
return RecordListModel.compare(self,r1,c1,r2,c2)
class UserHostSlaveReport(FarmReport):
def __init__(self):
FarmReport.__init__(self,'User Host Slave Summary')
self.columns = ['User', 'Host', 'Hours']
def generate(self,startDate,endDate):
FarmReport.generate(self,startDate,endDate)
loadTables()
stats = Database.current().exec_( "SELECT * FROM hosthistory_user_slave_summary( ?, ? );", [QVariant(startDate),QVariant(endDate)] );
while stats.next():
r = Record( RecordImp( user_slave_summary_table, stats ), False )
self.records.append( r )
print r.getValue( 'user' ).toString(), r.getValue( 'host' ).toString(), r.getValue( 'hours' ).toString()
def createModel( self, parent = None ):
ret = UserHostSlaveReportModel(parent)
self.setupModel(ret)
return ret
class UserRenderReportModel(RecordListModel):
def __init__(self,parent = None):
RecordListModel.__init__(self,parent)
self.IntervalCols = ['totalrendertime','totalerrortime']
def compare( self, r1, c1, r2, c2 ):
if c1.toLower() in self.IntervalCols:
i1 = Interval.fromString(r1.getValue(c1).toString())[0]
i2 = Interval.fromString(r2.getValue(c2).toString())[0]
return Interval.compare( i1, i2 )
return RecordListModel.compare(self,r1,c1,r2,c2)
def recordData( self, record, role, col ):
if role == Qt.DisplayRole and col.toLower() in self.IntervalCols:
i = Interval.fromString(record.getValue(col).toString())[0]
return QVariant(i.toString(Interval.Hours,Interval.Hours))
return RecordListModel.recordData(self,record,role,col)
class UserRenderReport(FarmReport):
def __init__(self):
FarmReport.__init__(self,'User Render Summary')
self.columns = ['User', 'TotalRenderTime', 'TotalErrorTime', 'ErrorTimePercent']
def generate(self,startDate,endDate):
FarmReport.generate(self,startDate,endDate)
loadTables()
stats = Database.current().exec_( "select usr.name, sum(coalesce(totaltasktime,'0'::interval)) as totalrendertime, sum(coalesce(totalerrortime,'0'::interval)) as totalerrortime, sum(coalesce(totalerrortime,'0'::interval))/sum(coalesce(totaltasktime,'0'::interval)+coalesce(totalerrortime,'0'::interval)) as errortimeperc from jobstat, usr where started > 'today'::timestamp - '7 days'::interval and ended is not null and fkeyusr=keyelement group by usr.name order by errortimeperc desc;" )
while stats.next():
r = Record( RecordImp( user_render_summary_table, stats ), False )
self.records.append( r )
print r.getValue( 'user' ).toString(), r.getValue( 'totalRenderTime' ).toString(), r.getValue( 'errortimepercent' ).toString()
def createModel( self, parent = None ):
ret = UserRenderReportModel(parent)
self.setupModel(ret)
return ret
class UserJobCountReport(FarmReport):
def __init__(self):
FarmReport.__init__(self,'User Job Counts')
self.columns = ['User','Ready','Started','Done','Suspended','Holding']
def generate(self,startDate,endDate):
FarmReport.generate(self,startDate,endDate)
loadTables()
per_user = {}
stats = Database.current().exec_("select name, job.status, count(*) from job, usr where fkeyusr=keyelement group by keyelement, job.status, name order by job.status, count desc;")
while stats.next():
user = stats.value(0).toString()
if not user in per_user:
per_user[user] = Record( RecordImp( user_job_count_table ), False )
per_user[user].setValue( "user", QVariant(user) )
per_user[user].setValue( stats.value(1).toString(), QVariant(stats.value(2).toInt()[0]) )
for rec in per_user.values():
self.records.append( rec )
class FarmReportType:
def __init__(self, reportName, reportClass):
self.Name = reportName
self.Class = reportClass
Types = [FarmReportType("User's Host Slave Report",UserHostSlaveReport), FarmReportType("User Render Report",UserRenderReport), FarmReportType("User Job Count Report", UserJobCountReport)]
| gpl-2.0 |
nitzmahone/ansible | test/units/modules/storage/netapp/test_netapp_e_asup.py | 56 | 7858 | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
from ansible.modules.storage.netapp.netapp_e_asup import Asup
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AsupTest(ModuleTestCase):
REQUIRED_PARAMS = {
'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1',
}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_asup.request'
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_get_config_asup_capable_false(self):
"""Ensure we fail correctly if ASUP is not available on this platform"""
self._set_args()
expected = dict(asupCapable=False, onDemandCapable=True)
asup = Asup()
# Expecting an update
with self.assertRaisesRegexp(AnsibleFailJson, r"not supported"):
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
asup.get_configuration()
def test_get_config_on_demand_capable_false(self):
"""Ensure we fail correctly if ASUP is not available on this platform"""
self._set_args()
expected = dict(asupCapable=True, onDemandCapable=False)
asup = Asup()
# Expecting an update
with self.assertRaisesRegexp(AnsibleFailJson, r"not supported"):
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
asup.get_configuration()
def test_get_config(self):
"""Validate retrieving the ASUP configuration"""
self._set_args()
expected = dict(asupCapable=True, onDemandCapable=True)
asup = Asup()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
config = asup.get_configuration()
self.assertEquals(config, expected)
def test_update_configuration(self):
"""Validate retrieving the ASUP configuration"""
self._set_args(dict(asup='enabled'))
expected = dict()
initial = dict(asupCapable=True,
asupEnabled=True,
onDemandEnabled=False,
remoteDiagsEnabled=False,
schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
asup = Asup()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
with mock.patch.object(asup, 'get_configuration', return_value=initial):
updated = asup.update_configuration()
self.assertTrue(req.called)
self.assertTrue(updated)
def test_update_configuration_asup_disable(self):
"""Validate retrieving the ASUP configuration"""
self._set_args(dict(asup='disabled'))
expected = dict()
initial = dict(asupCapable=True,
asupEnabled=True,
onDemandEnabled=False,
remoteDiagsEnabled=False,
schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
asup = Asup()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
with mock.patch.object(asup, 'get_configuration', return_value=initial):
updated = asup.update_configuration()
self.assertTrue(updated)
self.assertTrue(req.called)
# Ensure it was called with the right arguments
called_with = req.call_args
body = json.loads(called_with[1]['data'])
self.assertFalse(body['asupEnabled'])
def test_update_configuration_enable(self):
"""Validate retrieving the ASUP configuration"""
self._set_args(dict(asup='enabled'))
expected = dict()
initial = dict(asupCapable=False,
asupEnabled=False,
onDemandEnabled=False,
remoteDiagsEnabled=False,
schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
asup = Asup()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
with mock.patch.object(asup, 'get_configuration', return_value=initial):
updated = asup.update_configuration()
self.assertTrue(updated)
self.assertTrue(req.called)
# Ensure it was called with the right arguments
called_with = req.call_args
body = json.loads(called_with[1]['data'])
self.assertTrue(body['asupEnabled'])
self.assertTrue(body['onDemandEnabled'])
self.assertTrue(body['remoteDiagsEnabled'])
def test_update_configuration_request_exception(self):
"""Validate exception handling when request throws an exception."""
config_response = dict(asupEnabled=True,
onDemandEnabled=True,
remoteDiagsEnabled=True,
schedule=dict(daysOfWeek=[],
dailyMinTime=0,
weeklyMinTime=0,
dailyMaxTime=24,
weeklyMaxTime=24))
self._set_args(dict(state="enabled"))
asup = Asup()
with self.assertRaises(Exception):
with mock.patch.object(asup, 'get_configuration', return_value=config_response):
with mock.patch(self.REQ_FUNC, side_effect=Exception):
asup.update_configuration()
def test_init_schedule(self):
"""Validate schedule correct schedule initialization"""
self._set_args(dict(state="enabled", active=True, days=["sunday", "monday", "tuesday"], start=20, end=24))
asup = Asup()
self.assertTrue(asup.asup)
self.assertEquals(asup.days, ["sunday", "monday", "tuesday"]),
self.assertEquals(asup.start, 1200)
self.assertEquals(asup.end, 1439)
def test_init_schedule_invalid(self):
"""Validate updating ASUP with invalid schedule fails test."""
self._set_args(dict(state="enabled", active=True, start=22, end=20))
with self.assertRaisesRegexp(AnsibleFailJson, r"start time is invalid"):
Asup()
def test_init_schedule_days_invalid(self):
"""Validate updating ASUP with invalid schedule fails test."""
self._set_args(dict(state="enabled", active=True, days=["someday", "thataday", "nonday"]))
with self.assertRaises(AnsibleFailJson):
Asup()
def test_update(self):
"""Validate updating ASUP with valid schedule passes"""
initial = dict(asupCapable=True,
onDemandCapable=True,
asupEnabled=True,
onDemandEnabled=False,
remoteDiagsEnabled=False,
schedule=dict(daysOfWeek=[], dailyMinTime=0, weeklyMinTime=0, dailyMaxTime=24, weeklyMaxTime=24))
self._set_args(dict(state="enabled", active=True, days=["sunday", "monday", "tuesday"], start=10, end=20))
asup = Asup()
with self.assertRaisesRegexp(AnsibleExitJson, r"ASUP settings have been updated"):
with mock.patch(self.REQ_FUNC, return_value=(200, dict(asupCapable=True))):
with mock.patch.object(asup, "get_configuration", return_value=initial):
asup.update()
| gpl-3.0 |
masia02/chainer | tests/chainer_tests/optimizers_tests/test_optimizers_by_linear_model.py | 13 | 4116 | import unittest
import numpy as np
import six
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class LinearModel(object):
UNIT_NUM = 10
BATCH_SIZE = 32
EPOCH = 100
def __init__(self, optimizer):
self.model = chainer.FunctionSet(
l=F.Linear(self.UNIT_NUM, 2)
)
self.optimizer = optimizer
# true parameters
self.w = np.random.uniform(-1, 1,
(self.UNIT_NUM, 1)).astype(np.float32)
self.b = np.random.uniform(-1, 1, (1, )).astype(np.float32)
def _train_linear_classifier(self, model, optimizer, gpu):
def _make_label(x):
a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
t = np.empty_like(a).astype(np.int32)
t[a >= 0] = 0
t[a < 0] = 1
return t
def _make_dataset(batch_size, unit_num, gpu):
x_data = np.random.uniform(
-1, 1, (batch_size, unit_num)).astype(np.float32)
t_data = _make_label(x_data)
if gpu:
x_data = cuda.to_gpu(x_data)
t_data = cuda.to_gpu(t_data)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
return x, t
for epoch in six.moves.range(self.EPOCH):
x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
optimizer.zero_grads()
y = model.l(x)
loss = F.softmax_cross_entropy(y, t)
loss.backward()
optimizer.update()
x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
y_test = model.l(x_test)
return F.accuracy(y_test, t_test)
def _accuracy_cpu(self):
self.optimizer.setup(self.model)
return self._train_linear_classifier(self.model, self.optimizer, False)
def _accuracy_gpu(self):
model = self.model
optimizer = self.optimizer
model.to_gpu()
optimizer.setup(model)
return self._train_linear_classifier(model, optimizer, True)
def accuracy(self, gpu):
if gpu:
return cuda.to_cpu(self._accuracy_gpu().data)
else:
return self._accuracy_cpu().data
class OptimizerTestBase(object):
def create(self):
raise NotImplementedError()
def setUp(self):
self.model = LinearModel(self.create())
@condition.retry(10)
def test_linear_model_cpu(self):
self.assertGreater(self.model.accuracy(False), 0.9)
@attr.gpu
@condition.retry(10)
def test_linear_model_gpu(self):
self.assertGreater(self.model.accuracy(True), 0.9)
def test_initialize(self):
model = self.model.model
assert isinstance(model, chainer.FunctionSet)
optimizer = self.create()
optimizer.setup(model)
self.assertEqual(len(optimizer.tuples), len(model.parameters))
msg = "'params_grads' must have 'parameters' and 'gradients'"
with self.assertRaisesRegexp(ValueError, msg):
optimizer.setup('xxx')
class TestAdaDelta(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaDelta(eps=1e-5)
class TestAdaGrad(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaGrad(0.1)
class TestAdam(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.Adam(0.1)
class TestMomentumSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.MomentumSGD(0.1)
class TestRMSprop(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSprop(0.1)
class TestRMSpropGraves(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSpropGraves(0.1)
class TestSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.SGD(0.1)
testing.run_module(__name__, __file__)
| mit |
prekolna/AlgorithmsGreatestHits | search_algorithms/rselect.py | 2 | 1689 | import math
import random
def rselect(array_to_search, n):
"""
Returns the nth order statistic from the array provided.
"""
return rselect_internal(array_to_search, n, 0, len(array_to_search))
def rselect_internal(search_array, n, start_index, end_index):
"""
Recursively computes the nth order statistic between the start_index and
the end_index.
"""
assert(start_index >= 0 and end_index <= len(search_array))
# Trivial case
if start_index == end_index:
return search_array[start_index]
s = start_index
e = end_index
pp = math.floor(random.random() * (e - s)) + s
comparisons = 0
# Preprocessing to simplify tracking the sorted and unsorted portions
# of the array that is being sorted.
if pp != s:
search_array[s], search_array[pp] = search_array[pp], search_array[s]
c = search_array[s]
j = s + 1
i = s + 1
while j < e:
comparisons += 1
if search_array[j] < c:
search_array[j], search_array[i] = search_array[i], search_array[j]
i += 1
j += 1
# Put the pivot back
search_array[s], search_array[i - 1] = search_array[i - 1], search_array[s]
# The pivot is what we're looking for
if i - (s + 1) == n:
return search_array[i - 1]
if i - 1 > n:
return rselect_internal(search_array, n, s, i - 1)
else:
return rselect_internal(search_array, n - i, i, e)
def rselect_from_file(file_path, n):
"""
Performs Randomized select on a text file that contains an array containing
a single element per line.
"""
return rselect([int(line) for line in open(file_path)], n)
| mit |
betrisey/home-assistant | homeassistant/components/media_player/snapcast.py | 6 | 3231 | """
Support for interacting with Snapcast clients.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.snapcast/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_SELECT_SOURCE,
PLATFORM_SCHEMA, MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_IDLE, STATE_PLAYING, STATE_UNKNOWN, CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['snapcast==1.2.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'snapcast'
SUPPORT_SNAPCAST = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Snapcast platform."""
import snapcast.control
host = config.get(CONF_HOST)
port = config.get(CONF_PORT, snapcast.control.CONTROL_PORT)
try:
server = snapcast.control.Snapserver(host, port)
except socket.gaierror:
_LOGGER.error('Could not connect to Snapcast server at %s:%d',
host, port)
return False
add_devices([SnapcastDevice(client) for client in server.clients])
class SnapcastDevice(MediaPlayerDevice):
"""Representation of a Snapcast client device."""
# pylint: disable=abstract-method
def __init__(self, client):
"""Initialize the Snapcast device."""
self._client = client
@property
def name(self):
"""Return the name of the device."""
return self._client.identifier
@property
def volume_level(self):
"""Return the volume level."""
return self._client.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._client.muted
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_SNAPCAST
@property
def state(self):
"""Return the state of the player."""
if not self._client.connected:
return STATE_OFF
return {
'idle': STATE_IDLE,
'playing': STATE_PLAYING,
'unknown': STATE_UNKNOWN,
}.get(self._client.stream.status, STATE_UNKNOWN)
@property
def source(self):
"""Return the current input source."""
return self._client.stream.name
@property
def source_list(self):
"""List of available input sources."""
return list(self._client.streams_by_name().keys())
def mute_volume(self, mute):
"""Send the mute command."""
self._client.muted = mute
def set_volume_level(self, volume):
"""Set the volume level."""
self._client.volume = round(volume * 100)
def select_source(self, source):
"""Set input source."""
streams = self._client.streams_by_name()
if source in streams:
self._client.stream = streams[source].identifier
| mit |
HM2MC/Webfront | reportlab-2.5/tests/test_platypus_breaking.py | 5 | 10374 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
"""Tests pageBreakBefore, frameBreakBefore, keepWithNext...
"""
__version__='''$Id: test_platypus_breaking.py 3703 2010-04-14 17:13:51Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os, time
from string import split, strip, join, whitespace
from operator import truth
from types import StringType, ListType
import unittest
from reportlab.platypus.flowables import Flowable
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.lib.randomtext import randomText, PYTHON
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, Indenter, SimpleDocTemplate
from reportlab.platypus.paragraph import *
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 15.5*cm, 6*cm, 10*cm, id='F1')
frame2 = Frame(11.5*cm, 15.5*cm, 6*cm, 10*cm, id='F2')
frame3 = Frame(2.5*cm, 2.5*cm, 6*cm, 10*cm, id='F3')
frame4 = Frame(11.5*cm, 2.5*cm, 6*cm, 10*cm, id='F4')
self.allowSplitting = 0
self.showBoundary = 1
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1, frame2, frame3, frame4], myMainPageFrame)
self.addPageTemplates(template)
_text1='''Furthermore, the fundamental error of regarding functional notions as
categorial delimits a general convention regarding the forms of the
grammar. I suggested that these results would follow from the
assumption that the descriptive power of the base component may remedy
and, at the same time, eliminate a descriptive fact. Thus a subset of
English sentences interesting on quite independent grounds raises
serious doubts about the ultimate standard that determines the accuracy
of any proposed grammar. Of course, the natural general principle that
will subsume this case can be defined in such a way as to impose the
strong generative capacity of the theory. By combining adjunctions and
certain deformations, the descriptive power of the base component is not
subject to the levels of acceptability from fairly high (e.g. (99a)) to
virtual gibberish (e.g. (98d)).
'''
def _test0(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
a = story.append
styleSheet = getSampleStyleSheet()
h1 = styleSheet['Heading1']
h1.pageBreakBefore = 1
h1.keepWithNext = 1
h2 = styleSheet['Heading2']
h2.frameBreakBefore = 1
h2.keepWithNext = 1
h3 = styleSheet['Heading3']
h3.backColor = colors.cyan
h3.keepWithNext = 1
bt = styleSheet['BodyText']
btj = ParagraphStyle('bodyText1j',parent=bt,alignment=TA_JUSTIFY)
btr = ParagraphStyle('bodyText1r',parent=bt,alignment=TA_RIGHT)
btc = ParagraphStyle('bodyText1c',parent=bt,alignment=TA_CENTER)
a(Paragraph("""
<a name='top'/>Subsequent pages test pageBreakBefore, frameBreakBefore and
keepTogether attributes. Generated at %s. The number in brackets
at the end of each paragraph is its position in the story. (%d)""" % (
time.ctime(time.time()), len(story)), bt))
for i in xrange(10):
a(Paragraph('Heading 1 always starts a new page (%d)' % len(story), h1))
for j in xrange(3):
a(Paragraph('Heading1 paragraphs should always'
'have a page break before. Heading 2 on the other hand'
'should always have a FRAME break before (%d)' % len(story), bt))
a(Paragraph('Heading 2 always starts a new frame (%d)' % len(story), h2))
a(Paragraph('Heading1 paragraphs should always'
'have a page break before. Heading 2 on the other hand'
'should always have a FRAME break before (%d)' % len(story), bt))
for j in xrange(3):
a(Paragraph(randomText(theme=PYTHON, sentences=2)+' (%d)' % len(story), bt))
a(Paragraph('I should never be at the bottom of a frame (%d)' % len(story), h3))
a(Paragraph(randomText(theme=PYTHON, sentences=1)+' (%d)' % len(story), bt))
for align,bts in [('left',bt),('JUSTIFIED',btj),('RIGHT',btr),('CENTER',btc)]:
a(Paragraph('Now we do <br/> tests(align=%s)' % align, h1))
a(Paragraph('First off no br tags',h3))
a(Paragraph(_text1,bts))
a(Paragraph("<br/> after 'the' in line 4",h3))
a(Paragraph(_text1.replace('forms of the','forms of the<br/>',1),bts))
a(Paragraph("2*<br/> after 'the' in line 4",h3))
a(Paragraph(_text1.replace('forms of the','forms of the<br/><br/>',1),bts))
a(Paragraph("<br/> after 'I suggested ' in line 5",h3))
a(Paragraph(_text1.replace('I suggested ','I suggested<br/>',1),bts))
a(Paragraph("2*<br/> after 'I suggested ' in line 5",h3))
a(Paragraph(_text1.replace('I suggested ','I suggested<br/><br/>',1),bts))
a(Paragraph("<br/> at the end of the paragraph!",h3))
a(Paragraph("""text one<br/>text two<br/>""",bts))
a(Paragraph("Border with <br/> at the end of the paragraph!",h3))
bt1 = ParagraphStyle('bodyText1',bts)
bt1.borderWidth = 0.5
bt1.borderColor = colors.toColor('red')
bt1.backColor = colors.pink
bt1.borderRadius = 2
bt1.borderPadding = 3
a(Paragraph("""text one<br/>text two<br/>""",bt1))
a(Paragraph("Border no <br/> at the end of the paragraph!",h3))
bt1 = ParagraphStyle('bodyText1',bts)
bt1.borderWidth = 0.5
bt1.borderColor = colors.toColor('red')
bt1.backColor = colors.pink
bt1.borderRadius = 2
bt1.borderPadding = 3
a(Paragraph("""text one<br/>text two""",bt1))
a(Paragraph("Different border style!",h3))
bt2 = ParagraphStyle('bodyText1',bt1)
bt2.borderWidth = 1.5
bt2.borderColor = colors.toColor('blue')
bt2.backColor = colors.gray
bt2.borderRadius = 3
bt2.borderPadding = 3
a(Paragraph("""text one<br/>text two<br/>""",bt2))
for i in 0, 1, 2:
P = Paragraph("""This is a paragraph with <font color='blue'><a href='#top'>with an incredibly
long and boring link in side of it that
contains lots and lots of stupidly boring and worthless information.
So that we can split the link and see if we get problems like Dinu's.
I hope we don't, but you never do Know.</a></font>""",bt)
a(P)
doc = MyDocTemplate(outputfile('test_platypus_breaking.pdf'))
doc.multiBuild(story)
class BreakingTestCase(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
_test0(self)
def test1(self):
'''Ilpo Nyyss\xf6nen posted this broken test'''
normalStyle = ParagraphStyle(name = 'normal')
keepStyle = ParagraphStyle(name = 'keep', keepWithNext = True)
content = [
Paragraph("line 1", keepStyle),
Indenter(left = 1 * cm),
Paragraph("line 2", normalStyle),
]
doc = SimpleDocTemplate(outputfile('test_platypus_breaking1.pdf'))
doc.build(content)
def test2(self):
sty = ParagraphStyle(name = 'normal')
sty.fontName = 'Times-Roman'
sty.fontSize = 10
sty.leading = 12
p = Paragraph('one two three',sty)
p.wrap(20,36)
self.assertEqual(len(p.split(20,24)),2) #widows allowed
self.assertEqual(len(p.split(20,16)),0) #orphans disallowed
p.allowWidows = 0
self.assertEqual(len(p.split(20,24)),0) #widows disallowed
p.allowOrphans = 1
self.assertEqual(len(p.split(20,16)),2) #orphans allowed
def test3(self):
from reportlab.pdfgen.canvas import Canvas
aW=307
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
btj = ParagraphStyle('bodyText1j',parent=bt,alignment=TA_JUSTIFY)
p=Paragraph("""<a name='top'/>Subsequent pages test pageBreakBefore, frameBreakBefore and
keepTogether attributes. Generated at 1111. The number in brackets
at the end of each paragraph is its position in the story. llllllllllllllllllllllllll
bbbbbbbbbbbbbbbbbbbbbb ccccccccccccccccccccccc ddddddddddddddddddddd eeeeyyy""",btj)
w,h=p.wrap(aW,1000)
canv=Canvas('test_platypus_paragraph_just.pdf',pagesize=(aW,h))
i=len(canv._code)
p.drawOn(canv,0,0)
ParaCode=canv._code[i:]
canv.saveState()
canv.setLineWidth(0)
canv.setStrokeColorRGB(1,0,0)
canv.rect(0,0,aW,h)
canv.restoreState()
canv.showPage()
canv.save()
from reportlab import rl_config
x = rl_config.paraFontSizeHeightOffset and '50' or '53.17'
good = ['q', '1 0 0 1 0 0 cm', 'q', 'BT 1 0 0 1 0 '+x+' Tm 3.59 Tw 12 TL /F1 10 Tf 0 0 0 rg (Subsequent pages test pageBreakBefore, frameBreakBefore and) Tj T* 0 Tw .23 Tw (keepTogether attributes. Generated at 1111. The number in brackets) Tj T* 0 Tw .299167 Tw (at the end of each paragraph is its position in the story. llllllllllllllllllllllllll) Tj T* 0 Tw 66.9 Tw (bbbbbbbbbbbbbbbbbbbbbb ccccccccccccccccccccccc) Tj T* 0 Tw (ddddddddddddddddddddd eeeeyyy) Tj T* ET', 'Q', 'Q']
ok= ParaCode==good
assert ok, "\nParaCode=%r\nexpected=%r" % (ParaCode,good)
def makeSuite():
return makeSuiteForClasses(BreakingTestCase)
#noruntests
if __name__ == "__main__": #NORUNTESTS
if 'debug' in sys.argv:
_test0(None)
else:
unittest.TextTestRunner().run(makeSuite())
printLocation()
| mit |
rwboyer/marilyn-project | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatters/_mapping.py | 263 | 5508 | # -*- coding: utf-8 -*-
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping defintions. This file is generated by itself. Everytime
you change something on a builtin formatter defintion, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
from pygments.formatters.img import BmpImageFormatter
from pygments.formatters.img import GifImageFormatter
from pygments.formatters.img import ImageFormatter
from pygments.formatters.img import JpgImageFormatter
from pygments.formatters.latex import LatexFormatter
from pygments.formatters.other import NullFormatter
from pygments.formatters.other import RawTokenFormatter
from pygments.formatters.rtf import RtfFormatter
from pygments.formatters.svg import SvgFormatter
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
FORMATTERS = {
BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'),
SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.')
}
if __name__ == '__main__':
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
formatter = getattr(module, formatter_name)
found_formatters.append(
'%s: %r' % (formatter_name,
(formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them, that should make the diff files for svn smaller
found_formatters.sort()
imports.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('# start')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'w')
f.write(header)
f.write('# start\n')
f.write('\n'.join(['from %s import %s' % imp for imp in imports]))
f.write('\n\n')
f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
f.write(footer)
f.close()
| mit |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/openerp/addons/base/tests/test_api.py | 59 | 17630 |
from openerp import models
from openerp.tools import mute_logger
from openerp.osv.orm import except_orm
from openerp.tests import common
class TestAPI(common.TransactionCase):
""" test the new API of the ORM """
def assertIsRecordset(self, value, model):
self.assertIsInstance(value, models.BaseModel)
self.assertEqual(value._name, model)
def assertIsRecord(self, value, model):
self.assertIsRecordset(value, model)
self.assertTrue(len(value) <= 1)
def assertIsNull(self, value, model):
self.assertIsRecordset(value, model)
self.assertFalse(value)
@mute_logger('openerp.models')
def test_00_query(self):
""" Build a recordset, and check its contents. """
domain = [('name', 'ilike', 'j')]
ids = self.registry('res.partner').search(self.cr, self.uid, domain)
partners = self.env['res.partner'].search(domain)
# partners is a collection of browse records corresponding to ids
self.assertTrue(ids)
self.assertTrue(partners)
# partners and its contents are instance of the model, and share its ormcache
self.assertIsRecordset(partners, 'res.partner')
self.assertIs(partners._ormcache, self.env['res.partner']._ormcache)
for p in partners:
self.assertIsRecord(p, 'res.partner')
self.assertIs(p._ormcache, self.env['res.partner']._ormcache)
self.assertEqual([p.id for p in partners], ids)
self.assertEqual(self.env['res.partner'].browse(ids), partners)
@mute_logger('openerp.models')
def test_01_query_offset(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=10)
partners2 = self.env['res.partner'].search([])[10:]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_02_query_limit(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], limit=10)
partners2 = self.env['res.partner'].search([])[:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_03_query_offset_limit(self):
""" Build a recordset with offset and limit, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=3, limit=7)
partners2 = self.env['res.partner'].search([])[3:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_05_immutable(self):
""" Check that a recordset remains the same, even after updates. """
domain = [('name', 'ilike', 'j')]
partners = self.env['res.partner'].search(domain)
self.assertTrue(partners)
ids = map(int, partners)
# modify those partners, and check that partners has not changed
self.registry('res.partner').write(self.cr, self.uid, ids, {'active': False})
self.assertEqual(ids, map(int, partners))
# redo the search, and check that the result is now empty
partners2 = self.env['res.partner'].search(domain)
self.assertFalse(partners2)
@mute_logger('openerp.models')
def test_06_fields(self):
""" Check that relation fields return records, recordsets or nulls. """
user = self.registry('res.users').browse(self.cr, self.uid, self.uid)
self.assertIsRecord(user, 'res.users')
self.assertIsRecord(user.partner_id, 'res.partner')
self.assertIsRecordset(user.groups_id, 'res.groups')
partners = self.env['res.partner'].search([])
for name, cinfo in partners._all_columns.iteritems():
if cinfo.column._type == 'many2one':
for p in partners:
self.assertIsRecord(p[name], cinfo.column._obj)
elif cinfo.column._type == 'reference':
for p in partners:
if p[name]:
self.assertIsRecord(p[name], cinfo.column._obj)
elif cinfo.column._type in ('one2many', 'many2many'):
for p in partners:
self.assertIsRecordset(p[name], cinfo.column._obj)
@mute_logger('openerp.models')
def test_07_null(self):
""" Check behavior of null instances. """
# select a partner without a parent
partner = self.env['res.partner'].search([('parent_id', '=', False)])[0]
# check partner and related null instances
self.assertTrue(partner)
self.assertIsRecord(partner, 'res.partner')
self.assertFalse(partner.parent_id)
self.assertIsNull(partner.parent_id, 'res.partner')
self.assertIs(partner.parent_id.id, False)
self.assertFalse(partner.parent_id.user_id)
self.assertIsNull(partner.parent_id.user_id, 'res.users')
self.assertIs(partner.parent_id.user_id.name, False)
self.assertFalse(partner.parent_id.user_id.groups_id)
self.assertIsRecordset(partner.parent_id.user_id.groups_id, 'res.groups')
@mute_logger('openerp.models')
def test_10_old_old(self):
""" Call old-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method name_get on partners' model, and check its effect
res = partners._model.name_get(self.cr, self.uid, ids)
self.assertEqual(len(res), len(ids))
self.assertEqual(set(val[0] for val in res), set(ids))
@mute_logger('openerp.models')
def test_20_old_new(self):
""" Call old-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partners itself, and check its effect
res = partners.name_get()
self.assertEqual(len(res), len(partners))
self.assertEqual(set(val[0] for val in res), set(map(int, partners)))
@mute_logger('openerp.models')
def test_25_old_new(self):
""" Call old-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partner records, and check its effect
for p in partners:
res = p.name_get()
self.assertTrue(isinstance(res, list) and len(res) == 1)
self.assertTrue(isinstance(res[0], tuple) and len(res[0]) == 2)
self.assertEqual(res[0][0], p.id)
@mute_logger('openerp.models')
def test_30_new_old(self):
""" Call new-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method write on partners' model, and check its effect
partners._model.write(self.cr, self.uid, ids, {'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_40_new_new(self):
""" Call new-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partners itself, and check its effect
partners.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_45_new_new(self):
""" Call new-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partner records, and check its effects
for p in partners:
p.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
@mute_logger('openerp.addons.base.ir.ir_model')
def test_50_environment(self):
""" Test environment on records. """
# partners and reachable records are attached to self.env
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# check that the current user can read and modify company data
partners[0].company_id.name
partners[0].company_id.write({'name': 'Fools'})
# create an environment with the demo user
demo = self.env['res.users'].search([('login', '=', 'demo')])[0]
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# partners and related records are still attached to self.env
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# create record instances attached to demo_env
demo_partners = partners.sudo(demo)
self.assertEqual(demo_partners.env, demo_env)
for x in (demo_partners, demo_partners[0], demo_partners[0].company_id):
self.assertEqual(x.env, demo_env)
for p in demo_partners:
self.assertEqual(p.env, demo_env)
# demo user can read but not modify company data
demo_partners[0].company_id.name
with self.assertRaises(except_orm):
demo_partners[0].company_id.write({'name': 'Pricks'})
# remove demo user from all groups
demo.write({'groups_id': [(5,)]})
# demo user can no longer access partner data
with self.assertRaises(except_orm):
demo_partners[0].company_id.name
@mute_logger('openerp.models')
def test_55_draft(self):
""" Test draft mode nesting. """
env = self.env
self.assertFalse(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertFalse(env.in_draft)
@mute_logger('openerp.models')
def test_60_cache(self):
""" Check the record cache behavior """
partners = self.env['res.partner'].search([('child_ids', '!=', False)])
partner1, partner2 = partners[0], partners[1]
children1, children2 = partner1.child_ids, partner2.child_ids
self.assertTrue(children1)
self.assertTrue(children2)
# take a child contact
child = children1[0]
self.assertEqual(child.parent_id, partner1)
self.assertIn(child, partner1.child_ids)
self.assertNotIn(child, partner2.child_ids)
# fetch data in the cache
for p in partners:
p.name, p.company_id.name, p.user_id.name, p.contact_address
self.env.check_cache()
# change its parent
child.write({'parent_id': partner2.id})
self.env.check_cache()
# check recordsets
self.assertEqual(child.parent_id, partner2)
self.assertNotIn(child, partner1.child_ids)
self.assertIn(child, partner2.child_ids)
self.assertEqual(set(partner1.child_ids + child), set(children1))
self.assertEqual(set(partner2.child_ids), set(children2 + child))
self.env.check_cache()
# delete it
child.unlink()
self.env.check_cache()
# check recordsets
self.assertEqual(set(partner1.child_ids), set(children1) - set([child]))
self.assertEqual(set(partner2.child_ids), set(children2))
self.env.check_cache()
@mute_logger('openerp.models')
def test_60_cache_prefetching(self):
""" Check the record cache prefetching """
self.env.invalidate_all()
# all the records of an instance already have an entry in cache
partners = self.env['res.partner'].search([])
partner_ids = self.env.prefetch['res.partner']
self.assertEqual(set(partners.ids), set(partner_ids))
# countries have not been fetched yet; their cache must be empty
countries = self.env['res.country'].browse()
self.assertFalse(self.env.prefetch['res.country'])
# reading ONE partner should fetch them ALL
countries |= partners[0].country_id
country_cache = self.env.cache[partners._fields['country_id']]
self.assertLessEqual(set(partners._ids), set(country_cache))
# read all partners, and check that the cache already contained them
country_ids = list(self.env.prefetch['res.country'])
for p in partners:
countries |= p.country_id
self.assertLessEqual(set(countries.ids), set(country_ids))
@mute_logger('openerp.models')
def test_70_one(self):
""" Check method one(). """
# check with many records
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(len(ps) > 1)
with self.assertRaises(except_orm):
ps.ensure_one()
p1 = ps[0]
self.assertEqual(len(p1), 1)
self.assertEqual(p1.ensure_one(), p1)
p0 = self.env['res.partner'].browse()
self.assertEqual(len(p0), 0)
with self.assertRaises(except_orm):
p0.ensure_one()
@mute_logger('openerp.models')
def test_80_contains(self):
""" Test membership on recordset. """
p1 = self.env['res.partner'].search([('name', 'ilike', 'a')], limit=1).ensure_one()
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(p1 in ps)
@mute_logger('openerp.models')
def test_80_set_operations(self):
""" Check set operations on recordsets. """
pa = self.env['res.partner'].search([('name', 'ilike', 'a')])
pb = self.env['res.partner'].search([('name', 'ilike', 'b')])
self.assertTrue(pa)
self.assertTrue(pb)
self.assertTrue(set(pa) & set(pb))
concat = pa + pb
self.assertEqual(list(concat), list(pa) + list(pb))
self.assertEqual(len(concat), len(pa) + len(pb))
difference = pa - pb
self.assertEqual(len(difference), len(set(difference)))
self.assertEqual(set(difference), set(pa) - set(pb))
self.assertLessEqual(difference, pa)
intersection = pa & pb
self.assertEqual(len(intersection), len(set(intersection)))
self.assertEqual(set(intersection), set(pa) & set(pb))
self.assertLessEqual(intersection, pa)
self.assertLessEqual(intersection, pb)
union = pa | pb
self.assertEqual(len(union), len(set(union)))
self.assertEqual(set(union), set(pa) | set(pb))
self.assertGreaterEqual(union, pa)
self.assertGreaterEqual(union, pb)
# one cannot mix different models with set operations
ps = pa
ms = self.env['ir.ui.menu'].search([])
self.assertNotEqual(ps._name, ms._name)
self.assertNotEqual(ps, ms)
with self.assertRaises(except_orm):
res = ps + ms
with self.assertRaises(except_orm):
res = ps - ms
with self.assertRaises(except_orm):
res = ps & ms
with self.assertRaises(except_orm):
res = ps | ms
with self.assertRaises(except_orm):
res = ps < ms
with self.assertRaises(except_orm):
res = ps <= ms
with self.assertRaises(except_orm):
res = ps > ms
with self.assertRaises(except_orm):
res = ps >= ms
@mute_logger('openerp.models')
def test_80_filter(self):
""" Check filter on recordsets. """
ps = self.env['res.partner'].search([])
customers = ps.browse([p.id for p in ps if p.customer])
# filter on a single field
self.assertEqual(ps.filtered(lambda p: p.customer), customers)
self.assertEqual(ps.filtered('customer'), customers)
# filter on a sequence of fields
self.assertEqual(
ps.filtered(lambda p: p.parent_id.customer),
ps.filtered('parent_id.customer')
)
@mute_logger('openerp.models')
def test_80_map(self):
""" Check map on recordsets. """
ps = self.env['res.partner'].search([])
parents = ps.browse()
for p in ps: parents |= p.parent_id
# map a single field
self.assertEqual(ps.mapped(lambda p: p.parent_id), parents)
self.assertEqual(ps.mapped('parent_id'), parents)
# map a sequence of fields
self.assertEqual(
ps.mapped(lambda p: p.parent_id.name),
[p.parent_id.name for p in ps]
)
self.assertEqual(
ps.mapped('parent_id.name'),
[p.name for p in parents]
)
| agpl-3.0 |
haad/ansible | lib/ansible/modules/network/enos/enos_facts.py | 14 | 15199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Collect facts from Lenovo Switches running Lenovo ENOS commands
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: enos_facts
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Collect facts from remote devices running Lenovo ENOS
description:
- Collects a base set of device facts from a remote Lenovo device
running on ENOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1.68
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
'''
EXAMPLES = '''
Tasks: The following are examples of using the module enos_facts.
---
- name: Test Enos Facts
enos_facts:
provider={{ cli }}
vars:
cli:
host: "{{ inventory_hostname }}"
port: 22
username: admin
password: admin
transport: cli
timeout: 30
authorize: True
auth_pass:
---
# Collect all facts from the device
- enos_facts:
gather_subset: all
provider: "{{ cli }}"
# Collect only the config and default facts
- enos_facts:
gather_subset:
- config
provider: "{{ cli }}"
# Do not collect hardware facts
- enos_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
'''
RETURN = '''
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the Lenovo ENOS device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the Lenovo ENOS device
returned: always
type: str
ansible_net_version:
description: The ENOS operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: Indicates the active image for the device
returned: always
type: string
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in MB
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system.
This gives information on description, mac address, mtu, speed,
duplex and operstatus
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
'''
import re
from ansible.module_utils.network.enos.enos import run_commands, enos_argument_spec, check_args
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
self.PERSISTENT_COMMAND_TIMEOUT = 60
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS,
check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = ['show version', 'show run']
def populate(self):
super(Default, self).populate()
data = self.responses[0]
data_run = self.responses[1]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
if data_run:
self.facts['hostname'] = self.parse_hostname(data_run)
def parse_version(self, data):
match = re.search(r'^Software Version (.*?) ', data, re.M | re.I)
if match:
return match.group(1)
def parse_hostname(self, data_run):
for line in data_run.split('\n'):
line = line.strip()
match = re.match(r'hostname (.*?)', line, re.M | re.I)
if match:
hosts = line.split()
hostname = hosts[1].strip('\"')
return hostname
return "NA"
def parse_model(self, data):
# match = re.search(r'^Cisco (.+) \(revision', data, re.M)
match = re.search(r'^Lenovo RackSwitch (\S+)', data, re.M | re.I)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'(.*) image1(.*)', data, re.M | re.I)
if match:
return "Image1"
else:
return "Image2"
def parse_serialnum(self, data):
# match = re.search(r'board ID (\S+)', data)
match = re.search(r'^Switch Serial No: (\S+)', data, re.M | re.I)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'show system memory'
]
def populate(self):
super(Hardware, self).populate()
data = self.run(['show system memory'])
data = to_text(data, errors='surrogate_or_strict').strip()
data = data.replace(r"\n", "\n")
if data:
self.facts['memtotal_mb'] = self.parse_memtotal(data)
self.facts['memfree_mb'] = self.parse_memfree(data)
def parse_memtotal(self, data):
match = re.search(r'^MemTotal:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
def parse_memfree(self, data):
match = re.search(r'^MemFree:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = ['show interface status']
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data1 = self.run(['show interface status'])
data1 = to_text(data1, errors='surrogate_or_strict').strip()
data1 = data1.replace(r"\n", "\n")
data2 = self.run(['show lldp port'])
data2 = to_text(data2, errors='surrogate_or_strict').strip()
data2 = data2.replace(r"\n", "\n")
lines1 = None
lines2 = None
if data1:
lines1 = self.parse_interfaces(data1)
if data2:
lines2 = self.parse_interfaces(data2)
if lines1 is not None and lines2 is not None:
self.facts['interfaces'] = self.populate_interfaces(lines1, lines2)
data3 = self.run(['show lldp remote-device port'])
data3 = to_text(data3, errors='surrogate_or_strict').strip()
data3 = data3.replace(r"\n", "\n")
lines3 = None
if data3:
lines3 = self.parse_neighbors(data3)
if lines3 is not None:
self.facts['neighbors'] = self.populate_neighbors(lines3)
data4 = self.run(['show interface ip'])
data4 = data4[0].split('\n')
lines4 = None
if data4:
lines4 = self.parse_ipaddresses(data4)
ipv4_interfaces = self.set_ipv4_interfaces(lines4)
self.facts['all_ipv4_addresses'] = ipv4_interfaces
ipv6_interfaces = self.set_ipv6_interfaces(lines4)
self.facts['all_ipv6_addresses'] = ipv6_interfaces
def parse_ipaddresses(self, data4):
parsed = list()
for line in data4:
if len(line) == 0:
continue
else:
line = line.strip()
if len(line) == 0:
continue
match = re.search(r'IP4', line, re.M | re.I)
if match:
key = match.group()
parsed.append(line)
match = re.search(r'IP6', line, re.M | re.I)
if match:
key = match.group()
parsed.append(line)
return parsed
def set_ipv4_interfaces(self, line4):
ipv4_addresses = list()
for line in line4:
ipv4Split = line.split()
if ipv4Split[1] == "IP4":
ipv4_addresses.append(ipv4Split[2])
return ipv4_addresses
def set_ipv6_interfaces(self, line4):
ipv6_addresses = list()
for line in line4:
ipv6Split = line.split()
if ipv6Split[1] == "IP6":
ipv6_addresses.append(ipv6Split[2])
return ipv6_addresses
def populate_neighbors(self, lines3):
neighbors = dict()
for line in lines3:
neighborSplit = line.split("|")
innerData = dict()
innerData['Remote Chassis ID'] = neighborSplit[2].strip()
innerData['Remote Port'] = neighborSplit[3].strip()
sysName = neighborSplit[4].strip()
if sysName is not None:
innerData['Remote System Name'] = neighborSplit[4].strip()
else:
innerData['Remote System Name'] = "NA"
neighbors[neighborSplit[0].strip()] = innerData
return neighbors
def populate_interfaces(self, lines1, lines2):
interfaces = dict()
for line1, line2 in zip(lines1, lines2):
line = line1 + " " + line2
intfSplit = line.split()
innerData = dict()
innerData['description'] = intfSplit[6].strip()
innerData['macaddress'] = intfSplit[8].strip()
innerData['mtu'] = intfSplit[9].strip()
innerData['speed'] = intfSplit[1].strip()
innerData['duplex'] = intfSplit[2].strip()
innerData['operstatus'] = intfSplit[5].strip()
interfaces[intfSplit[0].strip()] = innerData
return interfaces
def parse_neighbors(self, neighbors):
parsed = list()
for line in neighbors.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^([0-9]+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(MGT+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
def parse_interfaces(self, data):
parsed = list()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^([0-9]+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(MGT+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
PERSISTENT_COMMAND_TIMEOUT = 60
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(enos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
alikins/ansible | test/units/modules/network/aruba/test_aruba_command.py | 57 | 4349 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.aruba import aruba_command
from units.modules.utils import set_module_args
from .aruba_module import TestArubaModule, load_fixture
class TestArubaCommandModule(TestArubaModule):
module = aruba_command
def setUp(self):
super(TestArubaCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.aruba.aruba_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestArubaCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_aruba_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Aruba Operating System Software'))
def test_aruba_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Aruba Operating System Software'))
def test_aruba_command_wait_for(self):
wait_for = 'result[0] contains "Aruba Operating System Software"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_aruba_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_aruba_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_aruba_command_match_any(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_aruba_command_match_all(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "Aruba Networks"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_aruba_command_match_all_failure(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
paolodedios/tensorflow | tensorflow/python/ops/ragged/ragged_factory_ops.py | 6 | 15316 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for constructing RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# Op to construct a constant RaggedTensor from a nested Python list.
#===============================================================================
@tf_export("ragged.constant")
@dispatch.add_dispatch_support
def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,
name=None, row_splits_dtype=dtypes.int64):
"""Constructs a constant RaggedTensor from a nested Python list.
Example:
>>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
<tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar
values, then `K` is one greater than the maximum depth of empty lists in
`pylist`. All scalar values in `pylist` must be compatible with `dtype`.
Args:
pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that
is not a `list`, `tuple` or `np.ndarray` must be a scalar value
compatible with `dtype`.
dtype: The type of elements for the returned `RaggedTensor`. If not
specified, then a default is chosen based on the scalar values in
`pylist`.
ragged_rank: An integer specifying the ragged rank of the returned
`RaggedTensor`. Must be nonnegative and less than `K`. Defaults to
`max(0, K - 1)` if `inner_shape` is not specified. Defaults to
`max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified.
inner_shape: A tuple of integers specifying the shape for individual inner
values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank`
is not specified. If `ragged_rank` is specified, then a default is chosen
based on the contents of `pylist`.
name: A name prefix for the returned tensor (optional).
row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits.
One of `tf.int32` or `tf.int64`.
Returns:
A potentially ragged tensor with rank `K` and the specified `ragged_rank`,
containing the values from `pylist`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
def ragged_factory(values, row_splits):
row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype)
return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits,
validate=False)
with ops.name_scope(name, "RaggedConstant"):
return _constant_value(ragged_factory, constant_op.constant, pylist, dtype,
ragged_rank, inner_shape)
@tf_export(v1=["ragged.constant_value"])
@dispatch.add_dispatch_support
def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None,
row_splits_dtype="int64"):
"""Constructs a RaggedTensorValue from a nested Python list.
Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`.
If you wish to construct a constant `RaggedTensor`, use
[`ragged.constant(...)`](constant.md) instead.
Example:
>>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]])
tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]),
row_splits=array([0, 2, 3, 6]))
All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no
scalar values, then `K` is one greater than the maximum depth of empty lists
in `pylist`. All scalar values in `pylist` must be compatible with `dtype`.
Args:
pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that
is not a `list` or `tuple` must be a scalar value compatible with `dtype`.
dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`.
If not specified, then a default is chosen based on the scalar values in
`pylist`.
ragged_rank: An integer specifying the ragged rank of the returned
`RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to
`max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K
- 1 - len(inner_shape))` if `inner_shape` is specified.
inner_shape: A tuple of integers specifying the shape for individual inner
values in the returned `RaggedTensorValue`. Defaults to `()` if
`ragged_rank` is not specified. If `ragged_rank` is specified, then a
default is chosen based on the contents of `pylist`.
row_splits_dtype: data type for the constructed `RaggedTensorValue`'s
row_splits. One of `numpy.int32` or `numpy.int64`.
Returns:
A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified
`ragged_rank`, containing the values from `pylist`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
if dtype is not None and isinstance(dtype, dtypes.DType):
dtype = dtype.as_numpy_dtype
row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype
def _ragged_factory(values, row_splits):
row_splits = np.array(row_splits, dtype=row_splits_dtype)
return ragged_tensor_value.RaggedTensorValue(values, row_splits)
def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument
return np.reshape(np.array(pylist, dtype=dtype), shape)
return _constant_value(_ragged_factory, _inner_factory, pylist, dtype,
ragged_rank, inner_shape)
def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank,
inner_shape):
"""Constructs a constant RaggedTensor or RaggedTensorValue.
Args:
ragged_factory: A factory function with the signature:
`ragged_factory(values, row_splits)`
inner_factory: A factory function with the signature: `inner_factory(pylist,
dtype, shape, name)`
pylist: A nested `list`, `tuple` or `np.ndarray`.
dtype: Data type for returned value.
ragged_rank: Ragged rank for returned value.
inner_shape: Inner value shape for returned value.
Returns:
A value returned by `ragged_factory` or `inner_factory`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
if ragged_tensor.is_ragged(pylist):
raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.")
# np.ndim builds an array, so we short-circuit lists and tuples.
if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:
# Scalar value
if ragged_rank is not None and ragged_rank != 0:
raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" %
(pylist, ragged_rank))
if inner_shape is not None and inner_shape:
raise ValueError(
"Invalid pylist=%r: incompatible with dim(inner_shape)=%d" %
(pylist, len(inner_shape)))
return inner_factory(pylist, dtype, ())
if ragged_rank is not None and ragged_rank < 0:
raise ValueError(
"Invalid ragged_rank=%r: must be nonnegative" % ragged_rank)
# Find the depth of scalar values in `pylist`.
scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)
if scalar_depth is not None:
if max_depth > scalar_depth:
raise ValueError("Invalid pylist=%r: empty list nesting is greater "
"than scalar value nesting" % pylist)
# If both inner_shape and ragged_rank were specified, then check that
# they are compatible with pylist.
if inner_shape is not None and ragged_rank is not None:
expected_depth = ragged_rank + len(inner_shape) + 1
if ((scalar_depth is not None and expected_depth != scalar_depth) or
(scalar_depth is None and expected_depth < max_depth)):
raise ValueError(
"Invalid pylist=%r: incompatible with ragged_rank=%d "
"and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape)))
# Check if the result is a `Tensor`.
if (ragged_rank == 0 or
(ragged_rank is None and
((max_depth < 2) or
(inner_shape is not None and max_depth - len(inner_shape) < 2)))):
return inner_factory(pylist, dtype, inner_shape)
# Compute default value for inner_shape.
if inner_shape is None:
if ragged_rank is None:
inner_shape = ()
else:
inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)
# Compute default value for ragged_rank.
if ragged_rank is None:
if scalar_depth is None:
ragged_rank = max(1, max_depth - 1)
else:
ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))
# Build the splits for each ragged rank, and concatenate the inner values
# into a single list.
nested_splits = []
values = pylist
for dim in range(ragged_rank):
nested_splits.append([0])
concatenated_values = []
for row in values:
nested_splits[dim].append(nested_splits[dim][-1] + len(row))
concatenated_values.extend(row)
values = concatenated_values
values = inner_factory(
values, dtype=dtype, shape=(len(values),) + inner_shape, name="values")
for row_splits in reversed(nested_splits):
values = ragged_factory(values, row_splits)
return values
def _find_scalar_and_max_depth(pylist):
"""Finds nesting depth of scalar values in pylist.
Args:
pylist: A nested python `list` or `tuple`.
Returns:
A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting
depth of scalar values in `pylist`, or `None` if `pylist` contains no
scalars. `max_depth` is the maximum depth of `pylist` (including
empty lists).
Raises:
ValueError: If pylist has inconsistent nesting depths for scalars.
"""
# Check if pylist is not scalar. np.ndim builds an array, so we
# short-circuit lists and tuples.
if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:
scalar_depth = None
max_depth = 1
for child in pylist:
child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)
if child_scalar_depth is not None:
if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:
raise ValueError("all scalar values must have the same nesting depth")
scalar_depth = child_scalar_depth + 1
max_depth = max(max_depth, child_max_depth + 1)
return (scalar_depth, max_depth)
return (0, 0)
def _default_inner_shape_for_pylist(pylist, ragged_rank):
"""Computes a default inner shape for the given python list."""
def get_inner_shape(item):
"""Returns the inner shape for a python list `item`."""
if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:
return ()
# Note that we need this check here in case `item` is not a Python list but
# fakes as being one (pylist). For a scenario of this, see test added in
# https://github.com/tensorflow/tensorflow/pull/48945
elif len(item) > 0: # pylint: disable=g-explicit-length-test
return (len(item),) + get_inner_shape(item[0])
return (0,)
def check_inner_shape(item, shape):
"""Checks that `item` has a consistent shape matching `shape`."""
is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0
if is_nested != bool(shape):
raise ValueError("inner values have inconsistent shape")
if is_nested:
if shape[0] != len(item):
raise ValueError("inner values have inconsistent shape")
for child in item:
check_inner_shape(child, shape[1:])
# Collapse the ragged layers to get the list of inner values.
flat_values = pylist
for dim in range(ragged_rank):
if not all(
isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values):
raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d "
"requires scalar value depth greater than %d" %
(dim + 1, ragged_rank, ragged_rank))
flat_values = sum((list(v) for v in flat_values), [])
# Compute the inner shape looking only at the leftmost elements; and then
# use check_inner_shape to verify that other elements have the same shape.
inner_shape = get_inner_shape(flat_values)
check_inner_shape(flat_values, inner_shape)
return inner_shape[1:]
@tf_export(v1=["ragged.placeholder"])
@dispatch.add_dispatch_support
def placeholder(dtype, ragged_rank, value_shape=None, name=None):
"""Creates a placeholder for a `tf.RaggedTensor` that will always be fed.
**Important**: This ragged tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The data type for the `RaggedTensor`.
ragged_rank: The ragged rank for the `RaggedTensor`
value_shape: The shape for individual flat values in the `RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `RaggedTensor` that may be used as a handle for feeding a value, but
not evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if ragged_rank == 0:
return array_ops.placeholder(dtype, value_shape, name)
with ops.name_scope(name, "RaggedPlaceholder", []):
flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)
result = array_ops.placeholder(dtype, flat_shape, "flat_values")
for i in reversed(range(ragged_rank)):
row_splits = array_ops.placeholder(dtypes.int64, [None],
"row_splits_%d" % i)
result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits,
validate=False)
return result
| apache-2.0 |
divyang4481/photivo | scons-local-2.2.0/SCons/SConf.py | 7 | 39115 | """SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(unicode(str))
self.s.write(unicode(str))
def writelines(self, lines):
for l in lines:
self.write(unicode(l + '\n'))
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
ar7z1/ansible | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | 45 | 8151 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if not self.module.check_mode:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
| gpl-3.0 |
kislayabhi/pgmpy | pgmpy/models/JunctionTree.py | 1 | 9137 | #!/usr/bin/env python3
from pgmpy.base import UndirectedGraph
from pgmpy.exceptions import CardinalityError
from collections import defaultdict
import numpy as np
class JunctionTree(UndirectedGraph):
"""
Class for representing Junction Tree.
Junction tree is undirected graph where each node represents a clique
(list, tuple or set of nodes) and edges represent sepset between two cliques.
Each sepset in G separates the variables strictly on one side of edge to
other.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is
created. The data is an edge list.
Examples
--------
Create an empty JunctionTree with no nodes and no edges
>>> from pgmpy.models import JunctionTree
>>> G = JunctionTree()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(JunctionTree, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
self.cardinalities = defaultdict(int)
def add_node(self, node, **kwargs):
"""
Add a single node to the junction tree.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> G = JunctionTree()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError('Node can only be a list, set or tuple of nodes'
'forming a clique')
node = tuple(node)
super(JunctionTree, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the junction tree.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> G = JunctionTree()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> G = JunctionTree()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if not set_u.intersection(set_v):
raise ValueError('No sepset found between these two edges.')
super(JunctionTree, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> from pgmpy.factors import Factor
>>> student = JunctionTree()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = Factor(['Alice', 'Bob'], cardinality=[3, 2], np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError('Factors defined on clusters of variable not'
'present in model')
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> from pgmpy.factors import Factor
>>> G = JunctionTree()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(node) for node in self.nodes()]
if set(node) not in nodes:
raise ValueError('Node not present in Junction Tree')
factors = list(filter(lambda x: set(x.scope()) == set(node),
self.factors))
return factors[0]
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> from pgmpy.factors import Factor
>>> student = JunctionTree()
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... np.random.rand(6))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_partition_function(self):
"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import JunctionTree
>>> from pgmpy.factors import Factor
>>> G = JunctionTree()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor.product(*[self.factors[i] for i in
range(1, len(self.factors))])
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors. In the same time also updates the cardinalities of all the random
variables.
* Checks if clique potentials are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
if self.get_factors(clique):
pass
else:
raise ValueError('Factors for all the cliques or clusters not'
'defined.')
if len(self.factors) != len(self.nodes()):
raise ValueError('One to one mapping of factor to clique or cluster'
'is not there.')
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if ((self.cardinalities[variable]) and
(self.cardinalities[variable] != cardinality)):
raise CardinalityError(
'Cardinality of variable %s not matching among factors' % variable)
else:
self.cardinalities[variable] = cardinality
return True
| mit |
kdwink/intellij-community | python/helpers/coveragepy/coverage/bytecode.py | 209 | 2036 | """Bytecode manipulation for coverage.py"""
import opcode, types
from coverage.backward import byte_to_int
class ByteCode(object):
"""A single bytecode."""
def __init__(self):
# The offset of this bytecode in the code object.
self.offset = -1
# The opcode, defined in the `opcode` module.
self.op = -1
# The argument, a small integer, whose meaning depends on the opcode.
self.arg = -1
# The offset in the code object of the next bytecode.
self.next_offset = -1
# The offset to jump to.
self.jump_to = -1
class ByteCodes(object):
"""Iterator over byte codes in `code`.
Returns `ByteCode` objects.
"""
# pylint: disable=R0924
def __init__(self, code):
self.code = code
def __getitem__(self, i):
return byte_to_int(self.code[i])
def __iter__(self):
offset = 0
while offset < len(self.code):
bc = ByteCode()
bc.op = self[offset]
bc.offset = offset
next_offset = offset+1
if bc.op >= opcode.HAVE_ARGUMENT:
bc.arg = self[offset+1] + 256*self[offset+2]
next_offset += 2
label = -1
if bc.op in opcode.hasjrel:
label = next_offset + bc.arg
elif bc.op in opcode.hasjabs:
label = bc.arg
bc.jump_to = label
bc.next_offset = offset = next_offset
yield bc
class CodeObjects(object):
"""Iterate over all the code objects in `code`."""
def __init__(self, code):
self.stack = [code]
def __iter__(self):
while self.stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = self.stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
self.stack.append(c)
yield code
| apache-2.0 |
ueshin/apache-spark | examples/src/main/python/ml/multilayer_perceptron_classification.py | 27 | 2133 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder.appName("multilayer_perceptron_classification_example").getOrCreate()
# $example on$
# Load training data
data = spark.read.format("libsvm")\
.load("data/mllib/sample_multiclass_classification_data.txt")
# Split the data into train and test
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
# specify layers for the neural network:
# input layer of size 4 (features), two intermediate of size 5 and 4
# and output of size 3 (classes)
layers = [4, 5, 4, 3]
# create the trainer and set its parameters
trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128, seed=1234)
# train the model
model = trainer.fit(train)
# compute accuracy on the test set
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print("Test set accuracy = " + str(evaluator.evaluate(predictionAndLabels)))
# $example off$
spark.stop()
| apache-2.0 |
mttr/django | django/utils/baseconv.py | 650 | 2982 | # Copyright (c) 2010 Guilherme Gondim. All rights reserved.
# Copyright (c) 2009 Simon Willison. All rights reserved.
# Copyright (c) 2002 Drew Perttula. All rights reserved.
#
# License:
# Python Software Foundation License version 2
#
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
# ALL WARRANTIES.
#
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
# code so it may be used in proprietary projects just like prior ``baseconv``
# distributions.
#
# All trademarks referenced herein are property of their respective holders.
#
"""
Convert numbers from base 10 integers to base X strings and back again.
Sample usage::
>>> base20 = BaseConverter('0123456789abcdefghij')
>>> base20.encode(1234)
'31e'
>>> base20.decode('31e')
1234
>>> base20.encode(-1234)
'-31e'
>>> base20.decode('-31e')
-1234
>>> base11 = BaseConverter('0123456789-', sign='$')
>>> base11.encode('$1234')
'$-22'
>>> base11.decode('$-22')
'$1234'
"""
BASE2_ALPHABET = '01'
BASE16_ALPHABET = '0123456789ABCDEF'
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
class BaseConverter(object):
decimal_digits = '0123456789'
def __init__(self, digits, sign='-'):
self.sign = sign
self.digits = digits
if sign in self.digits:
raise ValueError('Sign character found in converter base digits.')
def __repr__(self):
return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits)
def encode(self, i):
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
if neg:
return self.sign + value
return value
def decode(self, s):
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
if neg:
value = '-' + value
return int(value)
def convert(self, number, from_digits, to_digits, sign):
if str(number)[0] == sign:
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(from_digits) + from_digits.index(digit)
# create the result in base 'len(to_digits)'
if x == 0:
res = to_digits[0]
else:
res = ''
while x > 0:
digit = x % len(to_digits)
res = to_digits[digit] + res
x = int(x // len(to_digits))
return neg, res
base2 = BaseConverter(BASE2_ALPHABET)
base16 = BaseConverter(BASE16_ALPHABET)
base36 = BaseConverter(BASE36_ALPHABET)
base56 = BaseConverter(BASE56_ALPHABET)
base62 = BaseConverter(BASE62_ALPHABET)
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
| bsd-3-clause |
NLeSC/embodied-emotions-scripts | embem/bodyparts/make_body_part_mapping.py | 1 | 2023 | """Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
import argparse
import os
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
parser = argparse.ArgumentParser()
parser.add_argument('dir', help='directory containing the body part cluster '
'csv files (<embem_data_dir>/dict).')
parser.add_argument('json_out', help='name of file to write the mapping to '
'(json file).')
args = parser.parse_args()
dr = args.dir
mapping_r = csv2mapping(os.path.join(dr, 'body_part_clusters_renaissance.csv'))
mapping_c = csv2mapping(os.path.join(dr, 'body_part_clusters_classisism.csv'))
mapping_e = csv2mapping(os.path.join(dr,
'body_part_clusters_enlightenment.csv'))
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open(args.json_out, 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
| apache-2.0 |
epitron/youtube-dl | youtube_dl/extractor/polskieradio.py | 50 | 6668 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_unquote,
compat_urlparse
)
from ..utils import (
extract_attributes,
int_or_none,
strip_or_none,
unified_timestamp,
)
class PolskieRadioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie',
'info_dict': {
'id': '1587943',
'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
},
'playlist': [{
'md5': '2984ee6ce9046d91fc233bc1a864a09a',
'info_dict': {
'id': '1540576',
'ext': 'mp3',
'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
'timestamp': 1456594200,
'upload_date': '20160227',
'duration': 2364,
'thumbnail': r're:^https?://static\.prsa\.pl/images/.*\.jpg$'
},
}],
}, {
'url': 'http://www.polskieradio.pl/265/5217/Artykul/1635803,Euro-2016-nie-ma-miejsca-na-blad-Polacy-graja-ze-Szwajcaria-o-cwiercfinal',
'info_dict': {
'id': '1635803',
'title': 'Euro 2016: nie ma miejsca na błąd. Polacy grają ze Szwajcarią o ćwierćfinał',
'description': 'md5:01cb7d0cad58664095d72b51a1ebada2',
},
'playlist_mincount': 12,
}, {
'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis',
'only_matching': True,
}, {
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943',
'only_matching': True,
}, {
# with mp4 video
'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
content = self._search_regex(
r'(?s)<div[^>]+class="\s*this-article\s*"[^>]*>(.+?)<div[^>]+class="tags"[^>]*>',
webpage, 'content')
timestamp = unified_timestamp(self._html_search_regex(
r'(?s)<span[^>]+id="datetime2"[^>]*>(.+?)</span>',
webpage, 'timestamp', fatal=False))
thumbnail_url = self._og_search_thumbnail(webpage)
entries = []
media_urls = set()
for data_media in re.findall(r'<[^>]+data-media=({[^>]+})', content):
media = self._parse_json(data_media, playlist_id, fatal=False)
if not media.get('file') or not media.get('desc'):
continue
media_url = self._proto_relative_url(media['file'], 'http:')
if media_url in media_urls:
continue
media_urls.add(media_url)
entries.append({
'id': compat_str(media['id']),
'url': media_url,
'title': compat_urllib_parse_unquote(media['desc']),
'duration': int_or_none(media.get('length')),
'vcodec': 'none' if media.get('provider') == 'audio' else None,
'timestamp': timestamp,
'thumbnail': thumbnail_url
})
title = self._og_search_title(webpage).strip()
description = strip_or_none(self._og_search_description(webpage))
return self.playlist_result(entries, playlist_id, title, description)
class PolskieRadioCategoryIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.polskieradio.pl/7/5102,HISTORIA-ZYWA',
'info_dict': {
'id': '5102',
'title': 'HISTORIA ŻYWA',
},
'playlist_mincount': 38,
}, {
'url': 'http://www.polskieradio.pl/7/4807',
'info_dict': {
'id': '4807',
'title': 'Vademecum 1050. rocznicy Chrztu Polski'
},
'playlist_mincount': 5
}, {
'url': 'http://www.polskieradio.pl/7/129,Sygnaly-dnia?ref=source',
'only_matching': True
}, {
'url': 'http://www.polskieradio.pl/37,RedakcjaKatolicka/4143,Kierunek-Krakow',
'info_dict': {
'id': '4143',
'title': 'Kierunek Kraków',
},
'playlist_mincount': 61
}, {
'url': 'http://www.polskieradio.pl/10,czworka/214,muzyka',
'info_dict': {
'id': '214',
'title': 'Muzyka',
},
'playlist_mincount': 61
}, {
'url': 'http://www.polskieradio.pl/7,Jedynka/5102,HISTORIA-ZYWA',
'only_matching': True,
}, {
'url': 'http://www.polskieradio.pl/8,Dwojka/196,Publicystyka',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if PolskieRadioIE.suitable(url) else super(PolskieRadioCategoryIE, cls).suitable(url)
def _entries(self, url, page, category_id):
content = page
for page_num in itertools.count(2):
for a_entry, entry_id in re.findall(
r'(?s)<article[^>]+>.*?(<a[^>]+href=["\']/\d+/\d+/Artykul/(\d+)[^>]+>).*?</article>',
content):
entry = extract_attributes(a_entry)
href = entry.get('href')
if not href:
continue
yield self.url_result(
compat_urlparse.urljoin(url, href), PolskieRadioIE.ie_key(),
entry_id, entry.get('title'))
mobj = re.search(
r'<div[^>]+class=["\']next["\'][^>]*>\s*<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1',
content)
if not mobj:
break
next_url = compat_urlparse.urljoin(url, mobj.group('url'))
content = self._download_webpage(
next_url, category_id, 'Downloading page %s' % page_num)
def _real_extract(self, url):
category_id = self._match_id(url)
webpage = self._download_webpage(url, category_id)
title = self._html_search_regex(
r'<title>([^<]+) - [^<]+ - [^<]+</title>',
webpage, 'title', fatal=False)
return self.playlist_result(
self._entries(url, webpage, category_id),
category_id, title)
| unlicense |
phenoxim/cinder | cinder/backup/drivers/tsm.py | 4 | 21172 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run the backup and restore operations.
This version supports backup of block devices, e.g, FC, iSCSI, local as well as
regular files.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import json
import os
import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
LOG = logging.getLogger(__name__)
tsm_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username',
secret=True),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsm_opts)
VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
try:
svc_dict = json.loads(backup.service_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
# for backwards compatibility
vol_prefix = CONF.backup_tsm_volume_prefix
backup_id = backup['id']
backup_path = utils.make_dev_path('%s-%s' %
(vol_prefix, backup_id))
backup_mode = 'image'
if backup_mode not in VALID_BACKUP_MODES:
volume_id = backup['volume_id']
backup_id = backup['id']
err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
'Backup object has unexpected mode. Image or file '
'backups supported, actual mode is %(vol_mode)s.')
% {'op': operation,
'bck_id': backup_id,
'vol_id': volume_id,
'vol_mode': backup_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return backup_path, backup_mode
def _image_mode(backup_mode):
"""True if backup is image type."""
return backup_mode == 'image'
def _make_link(volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:param bckup_mode: TSM backup mode, either 'image' or 'file'
:raises: InvalidBackup
:returns: str -- hardlink path of the volume block device
"""
if _image_mode(bckup_mode):
hardlink_path = utils.make_dev_path('%s-%s' %
(CONF.backup_tsm_volume_prefix,
backup_id))
else:
dir, volname = os.path.split(volume_path)
hardlink_path = ('%s/%s-%s' %
(dir,
CONF.backup_tsm_volume_prefix,
backup_id))
_make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _check_dsmc_output(output, check_attrs, exact_match=True):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:param exact_match: if True, the check will pass only if the parsed
value is equal to the value specified in check_attrs. If false, the
check will pass if the parsed value is greater than or equal to the
value specified in check_attrs. This is needed because for file
backups, the parent directories may also be included the first a
volume is backed up.
:returns: bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
return False
elif not exact_match and int(parsed_attrs[ckey]) < int(cval):
return False
return True
def _get_volume_realpath(volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device or a regular file issue an
InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns: str -- real path of volume device
:returns: str -- backup mode to be used
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if stat.S_ISBLK(volume_mode):
backup_mode = 'image'
elif stat.S_ISREG(volume_mode):
backup_mode = 'file'
else:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is unexpected file type. Block or regular '
'files supported, actual file mode is %(vol_mode)s.')
% {'vol_id': volume_id,
'path': volume_path,
'vol_mode': volume_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError:
err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
'to volume at %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path, backup_mode
def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.',
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': exc.stdout,
'err': exc.stderr})
@interface.backupdriver
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db=None):
super(TSMBackupDriver, self).__init__(context, db)
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def check_for_setup_error(self):
required_flags = ['backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:param backup_mode: file mode of source volume; 'image' or 'file'
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
backup_cmd = ['dsmc', 'backup']
if _image_mode(backup_mode):
backup_cmd.append('image')
backup_cmd.extend(['-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % self.tsm_password,
backup_path])
out, err = utils.execute(*backup_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, backup_attrs, exact_match=False)
if not success:
err = (_('backup: %(vol_id)s failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
restore_cmd = ['dsmc', 'restore']
if _image_mode(backup_mode):
restore_cmd.append('image')
restore_cmd.append('-noprompt') # suppress prompt
else:
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
'-password=%s' % self.tsm_password,
backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
out, err = utils.execute(*restore_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s failed.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def backup(self, backup, volume_file, backup_metadata=False):
"""Backup the given volume to TSM.
TSM performs a backup of a volume. The volume_file is used
to determine the path of the block device that TSM will back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:param backup_metadata: whether or not to backup volume metadata
:raises InvalidBackup:
"""
# TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
# an example)
if backup_metadata:
msg = _("Volume metadata backup requested but this driver does "
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
volume_path, backup_mode = _get_volume_realpath(volume_file,
backup.volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.',
{'volume_id': backup.volume_id,
'volume_path': volume_path,
'mode': backup_mode})
backup_path = _create_unique_device_link(backup.id,
volume_path,
backup.volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
backup.service_metadata = json.dumps(service_metadata)
backup.save()
try:
self._do_backup(backup_path, backup.volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(backup_path, volume_path,
backup.volume_id)
LOG.debug('Backup %s finished.', backup.id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises: InvalidBackup
"""
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.',
{'volume_id': volume_id,
'backup_id': backup.id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
# be different than the original volume.
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
restore_path = _create_unique_device_link(backup.id,
volume_path,
volume_id,
backup_mode)
try:
self._do_restore(backup_path, restore_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup.id,
'volume_id': volume_id})
def delete_backup(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises: InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup.id,
'mode': backup_mode})
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=%s' % backup_mode,
'-password=%s' % self.tsm_password,
delete_path,
run_as_root=True,
check_exit_code=False)
except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
success = _check_dsmc_output(out, delete_attrs)
if not success:
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
LOG.error('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s',
{'vol_id': backup.volume_id,
'out': out,
'err': err})
LOG.debug('Delete %s finished.', backup['id'])
def get_backup_driver(context):
return TSMBackupDriver(context)
| apache-2.0 |
googleapis/python-compute | google/cloud/compute_v1/services/zones/client.py | 1 | 20310 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.services.zones import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import ZonesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import ZonesRestTransport
class ZonesClientMeta(type):
"""Metaclass for the Zones client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[ZonesTransport]]
_transport_registry["rest"] = ZonesRestTransport
def get_transport_class(cls, label: str = None,) -> Type[ZonesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ZonesClient(metaclass=ZonesClientMeta):
"""The Zones API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ZonesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ZonesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ZonesTransport:
"""Returns the transport used by the client instance.
Returns:
ZonesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ZonesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the zones client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ZonesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ZonesTransport):
# transport is a ZonesTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def get(
self,
request: compute.GetZoneRequest = None,
*,
project: str = None,
zone: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Zone:
r"""Returns the specified Zone resource. Gets a list of
available zones by making a list() request.
Args:
request (google.cloud.compute_v1.types.GetZoneRequest):
The request object. A request message for Zones.Get. See
the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Name of the zone resource to return.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Zone:
Represents a Zone resource.
A zone is a deployment area. These deployment areas
are subsets of a region. For example the zone
us-east1-a is located in the us-east1 region. For
more information, read Regions and Zones. (==
resource_for {$api_version}.zones ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, zone])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetZoneRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetZoneRequest):
request = compute.GetZoneRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if zone is not None:
request.zone = zone
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: compute.ListZonesRequest = None,
*,
project: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves the list of Zone resources available to the
specified project.
Args:
request (google.cloud.compute_v1.types.ListZonesRequest):
The request object. A request message for Zones.List.
See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.zones.pagers.ListPager:
Contains a list of zone resources.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListZonesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListZonesRequest):
request = compute.ListZonesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ZonesClient",)
| apache-2.0 |
yelizariev/account-financial-tools | __unported__/account_tax_update/model/select_taxes.py | 44 | 3407 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
# This module copyright (C) 2013 Camptocamp (<http://www.camptocamp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class SelectTaxes(orm.TransientModel):
_name = 'account.update.tax.select_taxes'
_description = 'Select the taxes to be updated'
_rec_name = 'type_tax_use' # wha'evar
def save_taxes(self, cr, uid, ids, context=None):
"""
Create tax lines in the update tax configuration
based on a user selection of taxes.
From these taxes, gather their hierarchically related
other taxes which need to be duplicated to.
From this gathering, ignore any taxes that might
have been added by the user earlier on.
"""
wiz = self.browse(cr, uid, ids[0], context=context)
# unused tax_pool = self.pool.get('account.tax')
line_pool = self.pool.get('account.update.tax.config.line')
def get_root_node(tax):
if tax.parent_id:
return get_root_node(tax.parent_id)
return tax
def add_tree(tax):
result = [tax]
if tax.child_ids:
for child in tax.child_ids:
result += add_tree(child)
return result
covered = [x.source_tax_id.id for x in
(wiz.config_id.sale_line_ids +
wiz.config_id.purchase_line_ids)]
taxes = []
for tax in list(set(map(get_root_node, wiz.tax_ids))):
taxes += add_tree(tax)
for tax in filter(lambda x: x.id not in covered, taxes):
line_pool.create(
cr, uid,
{'%s_config_id' % wiz.type_tax_use: wiz.config_id.id,
'source_tax_id': tax.id,
},
context=context)
return {'type': 'ir.actions.act_window_close'}
_columns = {
'type_tax_use': fields.char(
'Type tax use', size=16, readonly=True),
'config_id': fields.many2one(
'account.update.tax.config',
'Configuration', readonly=True),
'tax_ids': fields.many2many(
'account.tax', 'update_tax_select_account_tax_rel',
'tax_select_id', 'tax_id',
string='Taxes'),
'covered_tax_ids': fields.many2many(
'account.tax', 'update_tax_select_covered_taxes_rel',
'tax_select_id', 'tax_id',
string='Covered taxes'),
}
| agpl-3.0 |
Kalyzee/edx-platform | lms/djangoapps/courseware/tests/test_course_info.py | 49 | 4735 | """
Test the course_info xblock
"""
import mock
from nose.plugins.attrib import attr
from urllib import urlencode
from django.conf import settings
from django.core.urlresolvers import reverse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from util.date_utils import strftime_localized
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_CLOSED_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.models import CourseEnrollment
from .helpers import LoginEnrollmentTestCase
@attr('shard_1')
class CourseInfoTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the Course Info page
"""
def setUp(self):
super(CourseInfoTestCase, self).setUp()
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="course_info", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
def test_logged_in_unenrolled(self):
self.setup_user()
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn("You are not currently enrolled in this course", resp.content)
def test_logged_in_enrolled(self):
self.enroll(self.course)
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertNotIn("You are not currently enrolled in this course", resp.content)
def test_anonymous_user(self):
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("OOGIE BLOOGIE", resp.content)
def test_logged_in_not_enrolled(self):
self.setup_user()
url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.client.get(url)
# Check whether the user has been enrolled in the course.
# There was a bug in which users would be automatically enrolled
# with is_active=False (same as if they enrolled and immediately unenrolled).
# This verifies that the user doesn't have *any* enrollment record.
enrollment_exists = CourseEnrollment.objects.filter(
user=self.user, course_id=self.course.id
).exists()
self.assertFalse(enrollment_exists)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_non_live_course(self):
"""Ensure that a user accessing a non-live course sees a redirect to
the student dashboard, not a 404.
"""
self.setup_user()
self.enroll(self.course)
url = reverse('info', args=[unicode(self.course.id)])
response = self.client.get(url)
start_date = strftime_localized(self.course.start, 'SHORT_DATE')
self.assertRedirects(response, '{0}?{1}'.format(reverse('dashboard'), urlencode({'notlive': start_date})))
def test_nonexistent_course(self):
self.setup_user()
url = reverse('info', args=['not/a/course'])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@attr('shard_1')
class CourseInfoTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the Course Info page for an XML course
"""
MODULESTORE = TEST_DATA_MIXED_CLOSED_MODULESTORE
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that a course info page still appears when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in that course's course info page
# common/test/data/2014/info/updates.html
xml_data = "course info 463139"
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('info', args=[self.xml_course_key.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('info', args=[self.xml_course_key.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(self.xml_data, resp.content)
| agpl-3.0 |
suncycheng/intellij-community | python/lib/Lib/site-packages/django/contrib/comments/views/moderation.py | 307 | 5037 | from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from utils import next_redirect, confirmation_view
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: `comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request.POST.copy(), next, delete_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: `comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request.POST.copy(), next, approve_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into seperate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
# Confirmation views.
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
| apache-2.0 |
jeyram/smtes | sites/all/libraries/fckeditor/editor/filemanager/connectors/py/fckconnector.py | 89 | 2686 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| gpl-2.0 |
tdliu/hoop-picks | lib/rsa/_compat.py | 81 | 3890 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python compatibility wrappers."""
from __future__ import absolute_import
import sys
from struct import pack
try:
MAX_INT = sys.maxsize
except AttributeError:
MAX_INT = sys.maxint
MAX_INT64 = (1 << 63) - 1
MAX_INT32 = (1 << 31) - 1
MAX_INT16 = (1 << 15) - 1
# Determine the word size of the processor.
if MAX_INT == MAX_INT64:
# 64-bit processor.
MACHINE_WORD_SIZE = 64
elif MAX_INT == MAX_INT32:
# 32-bit processor.
MACHINE_WORD_SIZE = 32
else:
# Else we just assume 64-bit processor keeping up with modern times.
MACHINE_WORD_SIZE = 64
try:
# < Python3
unicode_type = unicode
except NameError:
# Python3.
unicode_type = str
# Fake byte literals.
if str is unicode_type:
def byte_literal(s):
return s.encode('latin1')
else:
def byte_literal(s):
return s
# ``long`` is no more. Do type detection using this instead.
try:
integer_types = (int, long)
except NameError:
integer_types = (int,)
b = byte_literal
# To avoid calling b() multiple times in tight loops.
ZERO_BYTE = b('\x00')
EMPTY_BYTE = b('')
def is_bytes(obj):
"""
Determines whether the given value is a byte string.
:param obj:
The value to test.
:returns:
``True`` if ``value`` is a byte string; ``False`` otherwise.
"""
return isinstance(obj, bytes)
def is_integer(obj):
"""
Determines whether the given value is an integer.
:param obj:
The value to test.
:returns:
``True`` if ``value`` is an integer; ``False`` otherwise.
"""
return isinstance(obj, integer_types)
def byte(num):
"""
Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
representation.
Use it as a replacement for ``chr`` where you are expecting a byte
because this will work on all current versions of Python::
:param num:
An unsigned integer between 0 and 255 (both inclusive).
:returns:
A single byte.
"""
return pack("B", num)
def get_word_alignment(num, force_arch=64,
_machine_word_size=MACHINE_WORD_SIZE):
"""
Returns alignment details for the given number based on the platform
Python is running on.
:param num:
Unsigned integral number.
:param force_arch:
If you don't want to use 64-bit unsigned chunks, set this to
anything other than 64. 32-bit chunks will be preferred then.
Default 64 will be used when on a 64-bit machine.
:param _machine_word_size:
(Internal) The machine word size used for alignment.
:returns:
4-tuple::
(word_bits, word_bytes,
max_uint, packing_format_type)
"""
max_uint64 = 0xffffffffffffffff
max_uint32 = 0xffffffff
max_uint16 = 0xffff
max_uint8 = 0xff
if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
# 64-bit unsigned integer.
return 64, 8, max_uint64, "Q"
elif num > max_uint16:
# 32-bit unsigned integer
return 32, 4, max_uint32, "L"
elif num > max_uint8:
# 16-bit unsigned integer.
return 16, 2, max_uint16, "H"
else:
# 8-bit unsigned integer.
return 8, 1, max_uint8, "B"
| apache-2.0 |
lavvy/xbmc | tools/EventClients/examples/python/example_button2.py | 228 | 2074 | #!/usr/bin/python
# This is a simple example showing how you can send a key press event
# to XBMC in a non-queued fashion to achieve a button pressed down
# event i.e. a key press that repeats.
# The repeat interval is currently hard coded in XBMC but that might
# change in the future.
# NOTE: Read the comments in 'example_button1.py' for a more detailed
# explanation.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO and can contain an icon
packet = PacketHELO("Example Remote", ICON_PNG,
"../../icons/bluetooth.png")
packet.send(sock, addr)
# wait for notification window to close (in XBMC)
time.sleep(5)
# send a up key press using the xbox gamepad map "XG" and button
# name "dpadup" ( see PacketBUTTON doc for more details)
packet = PacketBUTTON(map_name="XG", button_name="dpadup")
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# send a down key press using the raw keyboard code
packet = PacketBUTTON(code=0x28)
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# send a right key press using the keyboard map "KB" and button
# name "right"
packet = PacketBUTTON(map_name="KB", button_name="right")
packet.send(sock, addr)
# wait for a few seconds to see its effect
time.sleep(5)
# that's enough, release the button. During release, button code
# doesn't matter.
packet = PacketBUTTON(code=0x28, down=0)
packet.send(sock, addr)
# ok we're done, close the connection
# Note that closing the connection clears any repeat key that is
# active. So in this example, the actual release button event above
# need not have been sent.
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
| gpl-2.0 |
yujikato/DIRAC | src/DIRAC/ResourceStatusSystem/Utilities/RSSCache.py | 2 | 5865 | """
:mod: RSSCache
Extension of DictCache to be used within RSS
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
import datetime
import threading
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DictCache import DictCache
class RSSCache(object):
'''
Cache with purgeThread integrated
'''
def __init__(self, lifeTime, updateFunc=None, cacheHistoryLifeTime=None):
'''
Constructor
'''
self.__lifeTime = lifeTime
# lifetime of the history on hours
self.__cacheHistoryLifeTime = (1 and cacheHistoryLifeTime) or 24
self.__updateFunc = updateFunc
# RSSCache
self.__rssCache = DictCache()
self.__rssCacheStatus = [] # ( updateTime, message )
self.__rssCacheLock = threading.Lock()
# Create purgeThread
self.__refreshStop = False
self.__refreshThread = threading.Thread(target=self.__refreshCacheThreadRun)
self.__refreshThread.setDaemon(True)
def startRefreshThread(self):
'''
Run refresh thread.
'''
self.__refreshThread.start()
def stopRefreshThread(self):
'''
Stop refresh thread.
'''
self.__refreshStop = True
def isCacheAlive(self):
'''
Returns status of the cache refreshing thread
'''
return S_OK(self.__refreshThread.is_alive())
def setLifeTime(self, lifeTime):
'''
Set cache life time
'''
self.__lifeTime = lifeTime
def setCacheHistoryLifeTime(self, cacheHistoryLifeTime):
'''
Set cache life time
'''
self.__cacheHistoryLifeTime = cacheHistoryLifeTime
def getCacheKeys(self):
'''
List all the keys stored in the cache.
'''
self.__rssCacheLock.acquire()
keys = self.__rssCache.getKeys()
self.__rssCacheLock.release()
return S_OK(keys)
def acquireLock(self):
'''
Acquires RSSCache lock
'''
self.__rssCacheLock.acquire()
def releaseLock(self):
'''
Releases RSSCache lock
'''
self.__rssCacheLock.release()
def getCacheStatus(self):
'''
Return the latest cache status
'''
self.__rssCacheLock.acquire()
if self.__rssCacheStatus:
res = dict([self.__rssCacheStatus[0]])
else:
res = {}
self.__rssCacheLock.release()
return S_OK(res)
def getCacheHistory(self):
'''
Return the cache updates history
'''
self.__rssCacheLock.acquire()
res = dict(self.__rssCacheStatus)
self.__rssCacheLock.release()
return S_OK(res)
def get(self, resourceKey):
'''
Gets the resource(s) status(es). Every resource can have multiple statuses,
so in order to speed up things, we store them on the cache as follows::
{ (<resourceName>,<resourceStatusType0>) : whatever0,
(<resourceName>,<resourceStatusType1>) : whatever1,
}
'''
# cacheKey = '%s#%s' % ( resourceName, resourceStatusType )
self.__rssCacheLock.acquire()
resourceStatus = self.__rssCache.get(resourceKey)
self.__rssCacheLock.release()
if resourceStatus:
return S_OK({resourceKey: resourceStatus})
return S_ERROR('Cannot get %s' % resourceKey)
def getBulk(self, resourceKeys):
'''
Gets values for resourceKeys in one ATOMIC operation.
'''
result = {}
self.__rssCacheLock.acquire()
for resourceKey in resourceKeys:
resourceRow = self.__rssCache.get(resourceKey)
if not resourceRow:
return S_ERROR('Cannot get %s' % resourceKey)
result.update({resourceKey: resourceRow})
self.__rssCacheLock.release()
return S_OK(result)
def resetCache(self):
'''
Reset cache.
'''
self.__rssCacheLock.acquire()
self.__rssCache.purgeAll()
self.__rssCacheLock.release()
return S_OK()
def refreshCache(self):
'''
Clears the cache and gets its latest version, not Thread safe !
Acquire a lock before using it ! ( and release it afterwards ! )
'''
self.__rssCache.purgeAll()
if self.__updateFunc is None:
return S_ERROR('RSSCache has no updateFunction')
newCache = self.__updateFunc()
if not newCache['OK']:
return newCache
itemsAdded = self.__updateCache(newCache['Value'])
return itemsAdded
def refreshCacheAndHistory(self):
'''
Method that refreshes the cache and updates the history. Not thread safe,
you must acquire a lock before using it, and release it right after !
'''
refreshResult = self.refreshCache()
now = datetime.datetime.utcnow()
if self.__rssCacheStatus:
# Check oldest record
dateInserted, _message = self.__rssCacheStatus[-1]
if dateInserted < now - datetime.timedelta(hours=self.__cacheHistoryLifeTime):
self.__rssCacheStatus.pop()
self.__rssCacheStatus.insert(0, (now, refreshResult))
################################################################################
# Private methods
def __updateCache(self, newCache):
'''
The new cache must be a dictionary, which should look like::
{ ( <resourceName>,<resourceStatusType0>) : whatever0,
( <resourceName>,<resourceStatusType1>) : whatever1,
}
'''
itemsCounter = 0
for cacheKey, cacheValue in newCache.items():
self.__rssCache.add(cacheKey, self.__lifeTime, value=cacheValue)
itemsCounter += 1
return S_OK(itemsCounter)
def __refreshCacheThreadRun(self):
'''
Method that refreshes periodically the cache.
'''
while not self.__refreshStop:
self.__rssCacheLock.acquire()
self.refreshCacheAndHistory()
self.__rssCacheLock.release()
time.sleep(self.__lifeTime)
self.__refreshStop = False
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
OpenOil-UG/aleph | aleph/analyze/regex_entity.py | 1 | 2596 | import re
import logging
from threading import RLock
from itertools import count
from collections import defaultdict
from sqlalchemy.orm import joinedload
from aleph.core import db
from aleph.text import normalize_strong
from aleph.model import Reference, Entity
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
lock = RLock()
BATCH_SIZE = 1000
class EntityCache(object):
def __init__(self):
self.latest = None
self.matches = {}
self.regexes = []
def generate(self):
with lock:
self._generate()
def _generate(self):
latest = Entity.latest()
if self.latest is not None and self.latest >= latest:
return
self.latest = latest
self.matches = defaultdict(set)
q = Entity.all()
q = q.options(joinedload('other_names'))
q = q.filter(Entity.state == Entity.STATE_ACTIVE)
for entity in q:
for term in entity.regex_terms:
self.matches[normalize_strong(term)].add(entity.id)
self.regexes = []
terms = self.matches.keys()
terms = [t for t in terms if len(t) > 2]
for i in count(0):
terms_slice = terms[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
if not len(terms_slice):
break
body = '|'.join(terms_slice)
rex = re.compile('( |^)(%s)( |$)' % body)
# rex = re.compile('(%s)' % body)
self.regexes.append(rex)
log.info('Generating entity tagger: %r (%s terms)',
latest, len(terms))
class RegexEntityAnalyzer(Analyzer):
cache = EntityCache()
origin = 'regex'
def prepare(self):
self.cache.generate()
self.entities = defaultdict(int)
def on_text(self, text):
text = normalize_strong(text)
if text is None or len(text) <= 2:
return
for rex in self.cache.regexes:
for match in rex.finditer(text):
match = match.group(2)
for entity_id in self.cache.matches.get(match, []):
self.entities[entity_id] += 1
def finalize(self):
Reference.delete_document(self.document.id, origin=self.origin)
for entity_id, weight in self.entities.items():
ref = Reference()
ref.document_id = self.document.id
ref.entity_id = entity_id
ref.origin = self.origin
ref.weight = weight
db.session.add(ref)
log.info('Regex extraced %s entities.', len(self.entities))
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.