repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jeroanan/Aquarius | tests/TestAquarius.py | 1 | 4439 | import unittest
from unittest.mock import Mock
from aquarius.Aquarius import Aquarius
from aquarius.Harvester import Harvester
from aquarius.Interactor import Interactor
from aquarius.InteractorFactory import InteractorFactory
from aquarius.interactors.AddBookInteractor import AddBookInteractor
from aquarius.interactors.GetBookDetailsInteractor import GetBookDetailsInteractor
from aquarius.interactors.GetBookTypeInteractor import GetBookTypeInteractor
from aquarius.interactors.ListBooksByFirstLetterInteractor import ListBooksByFirstLetterInteractor
from aquarius.output.web.Web import Web
class TestAquarius(unittest.TestCase):
def setUp(self):
self.setup_interactors()
self.__app = Aquarius("dummy", "whatever", self.__interactor_factory)
self.__setup_harvester_mock()
self.__gotCallback = False
def setup_interactors(self):
self.__search_book_interactor = Mock(Interactor)
self.__add_book_interactor = Mock(AddBookInteractor)
self.__list_books_by_first_letter_interactor = Mock(ListBooksByFirstLetterInteractor)
self.__get_book_details_interactor = Mock(GetBookDetailsInteractor)
self.__get_book_type_interactor = Mock(GetBookTypeInteractor)
self.__interactor_factory = InteractorFactory()
self.__interactor_factory.get_search_book_interactor = Mock(return_value=self.__search_book_interactor)
self.__interactor_factory.get_add_book_interactor = Mock(return_value=self.__add_book_interactor)
self.__interactor_factory.get_list_books_by_first_letter_interactor = \
Mock(return_value=self.__list_books_by_first_letter_interactor)
self.__interactor_factory.get_book_details_interactor = Mock(return_value=self.__get_book_details_interactor)
self.__interactor_factory.get_book_type_interactor = Mock(return_value=self.__get_book_type_interactor)
def __setup_harvester_mock(self):
self.__harvester = harvester = Harvester()
harvester.do_harvest = Mock()
self.__app.set_harvester(harvester)
def test_search_books_uses_interactor_factory(self):
self.__app.search_books("")
self.assertTrue(self.__interactor_factory.get_search_book_interactor.called)
def test_search_books_calls_interactor(self):
self.__app.search_books("")
self.assertTrue(self.__search_book_interactor.execute.called)
def test_list_books_by_first_letter_uses_interactor_factory(self):
self.__app.list_books_by_first_letter("b")
self.assertTrue(self.__interactor_factory.get_list_books_by_first_letter_interactor.called)
def test_list_books_by_first_letter_calls_interactor(self):
self.__app.list_books_by_first_letter("b")
self.assertTrue(self.__list_books_by_first_letter_interactor.execute.called)
def test_get_book_details_uses_interactor_factory(self):
self.__app.get_book_details(0)
self.assertTrue(self.__interactor_factory.get_book_details_interactor.called)
def test_get_book_details_calls_interactor(self):
self.__app.get_book_details(0)
self.assertTrue(self.__get_book_details_interactor.execute.called)
def test_get_book_type_uses_interactor_factory(self):
self.__app.get_book_type("EPUB")
self.assertTrue(self.__interactor_factory.get_book_type_interactor.called)
def test_get_book_type_calls_interactor(self):
self.__app.get_book_type("EPUB")
self.assertTrue(self.__get_book_type_interactor.execute.called)
def test_add_book_uses_interactor_factory(self):
self.__app.add_book(None)
self.assertTrue(self.__interactor_factory.get_add_book_interactor.called)
def test_add_book_calls_interactor(self):
self.__app.add_book(None)
self.assertTrue(self.__add_book_interactor.execute.called)
def test_call_main(self):
output = Web(self.__app, None)
output.main = Mock()
self.__app.set_output(output)
self.__app.main()
self.assertTrue(output.main.called)
def test_calling_harvest_books_calls_harvester(self):
self.__app.harvest_books()
self.assertTrue(self.__harvester.do_harvest.called)
def test_calling_harvest_books_does_not_call_harvester_when_is_harvesting_set(self):
self.__app.is_harvesting = True
self.__app.harvest_books()
self.assertFalse(self.__harvester.do_harvest.called) | gpl-3.0 |
jimsimon/sky_engine | build/gyp_environment.py | 101 | 1320 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Sets up various automatic gyp environment variables. These are used by
gyp_chromium and landmines.py which run at different stages of runhooks. To
make sure settings are consistent between them, all setup should happen here.
"""
import gyp_helper
import os
import sys
import vs_toolchain
def SetEnvironment():
"""Sets defaults for GYP_* variables."""
gyp_helper.apply_chromium_gyp_env()
# Default to ninja on linux and windows, but only if no generator has
# explicitly been set.
# Also default to ninja on mac, but only when not building chrome/ios.
# . -f / --format has precedence over the env var, no need to check for it
# . set the env var only if it hasn't been set yet
# . chromium.gyp_env has been applied to os.environ at this point already
if sys.platform.startswith(('linux', 'win', 'freebsd')) and \
not os.environ.get('GYP_GENERATORS'):
os.environ['GYP_GENERATORS'] = 'ninja'
elif sys.platform == 'darwin' and not os.environ.get('GYP_GENERATORS') and \
not 'OS=ios' in os.environ.get('GYP_DEFINES', []):
os.environ['GYP_GENERATORS'] = 'ninja'
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
| bsd-3-clause |
paulrouget/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/test_wpttest.py | 6 | 7222 | import pytest
import sys
from io import BytesIO
from mock import Mock
from manifest import manifest as wptmanifest
from manifest.item import TestharnessTest
from .. import manifestexpected, wpttest
dir_ini_0 = """\
prefs: [a:b]
"""
dir_ini_1 = """\
prefs: [@Reset, b:c]
max-asserts: 2
min-asserts: 1
tags: [b, c]
"""
dir_ini_2 = """\
lsan-max-stack-depth: 42
"""
test_0 = """\
[0.html]
prefs: [c:d]
max-asserts: 3
tags: [a, @Reset]
"""
test_1 = """\
[1.html]
prefs:
if os == 'win': [a:b, c:d]
expected:
if os == 'win': FAIL
"""
test_2 = """\
[2.html]
lsan-max-stack-depth: 42
"""
test_3 = """\
[3.html]
[subtest1]
expected: [PASS, FAIL]
[subtest2]
disabled: reason
[subtest3]
expected: FAIL
"""
test_4 = """\
[4.html]
expected: FAIL
"""
test_5 = """\
[5.html]
"""
test_6 = """\
[6.html]
expected: [OK, FAIL]
"""
test_fuzzy = """\
[fuzzy.html]
fuzzy: fuzzy-ref.html:1;200
"""
testharness_test = """<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>"""
def make_mock_manifest(*items):
rv = Mock(tests_root="/foobar")
tests = []
rv.__iter__ = lambda self: iter(tests)
rv.__getitem__ = lambda self, k: tests[k]
for test_type, dir_path, num_tests in items:
for i in range(num_tests):
filename = dir_path + "/%i.html" % i
tests.append((test_type,
filename,
{TestharnessTest("/foo.bar", filename, "/", filename)}))
return rv
def make_test_object(test_name,
test_path,
index,
items,
inherit_metadata=None,
iterate=False,
condition=None):
inherit_metadata = inherit_metadata if inherit_metadata is not None else []
condition = condition if condition is not None else {}
tests = make_mock_manifest(*items) if isinstance(items, list) else make_mock_manifest(items)
test_metadata = manifestexpected.static.compile(BytesIO(test_name),
condition,
data_cls_getter=manifestexpected.data_cls_getter,
test_path=test_path,
url_base="/")
test = next(iter(tests[index][2])) if iterate else tests[index][2].pop()
return wpttest.from_manifest(tests, test, inherit_metadata, test_metadata.get_test(test.id))
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_metadata_inherit():
items = [("test", "a", 10), ("test", "a/b", 10), ("test", "c", 10)]
inherit_metadata = [
manifestexpected.static.compile(
BytesIO(item),
{},
data_cls_getter=lambda x,y: manifestexpected.DirectoryManifest)
for item in [dir_ini_0, dir_ini_1]]
test_obj = make_test_object(test_0, "a/0.html", 0, items, inherit_metadata, True)
assert test_obj.max_assertion_count == 3
assert test_obj.min_assertion_count == 1
assert test_obj.prefs == {"b": "c", "c": "d"}
assert test_obj.tags == {"a", "dir:a"}
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_conditional():
items = [("test", "a", 10), ("test", "a/b", 10), ("test", "c", 10)]
test_obj = make_test_object(test_1, "a/1.html", 1, items, None, True, {"os": "win"})
assert test_obj.prefs == {"a": "b", "c": "d"}
assert test_obj.expected() == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_metadata_lsan_stack_depth():
items = [("test", "a", 10), ("test", "a/b", 10)]
test_obj = make_test_object(test_2, "a/2.html", 2, items, None, True)
assert test_obj.lsan_max_stack_depth == 42
test_obj = make_test_object(test_2, "a/2.html", 1, items, None, True)
assert test_obj.lsan_max_stack_depth is None
inherit_metadata = [
manifestexpected.static.compile(
BytesIO(dir_ini_2),
{},
data_cls_getter=lambda x,y: manifestexpected.DirectoryManifest)
]
test_obj = make_test_object(test_0, "a/0/html", 0, items, inherit_metadata, False)
assert test_obj.lsan_max_stack_depth == 42
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_subtests():
test_obj = make_test_object(test_3, "a/3.html", 3, ("test", "a", 4), None, False)
assert test_obj.expected("subtest1") == "PASS"
assert test_obj.known_intermittent("subtest1") == ["FAIL"]
assert test_obj.expected("subtest2") == "PASS"
assert test_obj.known_intermittent("subtest2") == []
assert test_obj.expected("subtest3") == "FAIL"
assert test_obj.known_intermittent("subtest3") == []
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_expected_fail():
test_obj = make_test_object(test_4, "a/4.html", 4, ("test", "a", 5), None, False)
assert test_obj.expected() == "FAIL"
assert test_obj.known_intermittent() == []
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_no_expected():
test_obj = make_test_object(test_5, "a/5.html", 5, ("test", "a", 6), None, False)
assert test_obj.expected() == "OK"
assert test_obj.known_intermittent() == []
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_known_intermittent():
test_obj = make_test_object(test_6, "a/6.html", 6, ("test", "a", 7), None, False)
assert test_obj.expected() == "OK"
assert test_obj.known_intermittent() == ["FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="bytes/text confusion in py3")
def test_metadata_fuzzy():
manifest_data = {
"items": {"reftest": {"a/fuzzy.html": [["a/fuzzy.html",
[["/a/fuzzy-ref.html", "=="]],
{"fuzzy": [[["/a/fuzzy.html", '/a/fuzzy-ref.html', '=='],
[[2, 3], [10, 15]]]]}]]}},
"paths": {"a/fuzzy.html": ["0"*40, "reftest"]},
"version": 6,
"url_base": "/"}
manifest = wptmanifest.Manifest.from_json(".", manifest_data)
test_metadata = manifestexpected.static.compile(BytesIO(test_fuzzy),
{},
data_cls_getter=manifestexpected.data_cls_getter,
test_path="a/fuzzy.html",
url_base="/")
test = manifest.iterpath("a/fuzzy.html").next()
test_obj = wpttest.from_manifest(manifest, test, [], test_metadata.get_test(test.id))
assert test_obj.fuzzy == {('/a/fuzzy.html', '/a/fuzzy-ref.html', '=='): [[2, 3], [10, 15]]}
assert test_obj.fuzzy_override == {'/a/fuzzy-ref.html': ((1, 1), (200, 200))}
| mpl-2.0 |
Mandelag/util | AGSMSLayerDownloader/consolidator.py | 1 | 1493 | import os, sys, arcpy, json
def consolidate(directory, output, output_name):
#import pdb
scriptpath = sys.path[0]
inputdir = os.path.join(scriptpath, directory)
outputdir = os.path.join(scriptpath, output)
files = os.listdir(inputdir)
n_files = len(files)
jsons = []
features = {}
counter = 0
group = 0
once = True;
for filename in files:
file_full_name = os.path.join(inputdir, filename)
output_full_name = os.path.join(outputdir, filename)
#arcpy.JSONToFeatures_conversion(file_full_name, output_full_name)
with open(file_full_name) as input:
if once:
features = json.load(input)
once = False
else:
features["features"].extend(json.load(input)["features"])
counter = counter + 1;
output_lists = []
if counter >= 500:
group = group + 1
output_name = output_name+"_"+str(group)+".json"
with open(output_name, "w+") as outputfile:
outputfile.write(json.dumps(features))
output_lists.append(output_name)
counter = 0
features = {}
once = True
print(output_name)
if __name__ == "__main__":
if len(sys.argv) > 3:
consolidate(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print("Usage: \n python consolidator.py [input_folder] [output_folder] [output_name]")
exit()
| gpl-3.0 |
goddino/libjingle | trunk/tools/gyp/test/lib/TestCommon.py | 307 | 21397 | """
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python_executable
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import copy
import os
import os.path
import stat
import string
import sys
import types
import UserList
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = ''
dll_suffix = '.dll'
elif string.find(sys.platform, 'irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif string.find(sys.platform, 'darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif string.find(sys.platform, 'sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
def _failed(self, status = 0):
if self.status is None or status is None:
return None
try:
return _status(self) not in status
except TypeError:
# status wasn't an iterable
return _status(self) != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
apply(TestCmd.__init__, [self], kw)
os.chdir(self.workdir)
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
unwritable = filter(lambda x, iw=is_writable: not iw(x), existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if unwritable:
print "Unwritable files: `%s'" % string.join(unwritable, "', `")
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb'):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, required) != -1)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
missing = []
for line in lines:
if not find(output, line):
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
for line in lines:
if find(output, line):
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_lines(self, lines, output, title=None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
missing = filter(lambda x: not os.path.exists(x), files)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb'):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
try:
self.fail_test(not self.match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb'):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, banned) != -1)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
unexpected = []
for line in lines:
if find(output, line):
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None):
return self.must_not_contain_any_line(output, lines, title)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing = filter(os.path.exists, files)
if existing:
print "Unexpected files exist: `%s'" % string.join(existing, "', `")
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
writable = filter(is_writable, existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if writable:
print "Writable files: `%s'" % string.join(writable, "', `")
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, str(_status(self)), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stdout is None and not match(actual_stdout, expected_stdout):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stderr is None and not match(actual_stderr, expected_stderr):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
This handles the "options" keyword argument and exceptions.
"""
options = kw.pop('options', None)
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
try:
return apply(TestCmd.start,
(self, program, interpreter, arguments, universal_newlines),
kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
apply(TestCmd.finish, (self, popen,), kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The arguments are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
kw['arguments'] = arguments
match = kw.pop('match', self.match)
apply(TestCmd.run, [self], kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
eddyb/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/_code/_py2traceback.py | 192 | 2887 | # copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
import types
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
if isinstance(badline, bytes): # python 2 only
badline = badline.decode('utf-8', 'replace')
lines.append(u' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return unicode(value)
except Exception:
try:
return str(value)
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
| mpl-2.0 |
stuartarchibald/numba | numba/core/types/abstract.py | 5 | 14721 | from abc import ABCMeta, abstractmethod, abstractproperty
from typing import Dict as ptDict, Type as ptType
import itertools
import weakref
import numpy as np
from numba.core.utils import cached_property
# Types are added to a global registry (_typecache) in order to assign
# them unique integer codes for fast matching in _dispatcher.c.
# However, we also want types to be disposable, therefore we ensure
# each type is interned as a weak reference, so that it lives only as
# long as necessary to keep a stable type code.
# NOTE: some types can still be made immortal elsewhere (for example
# in _dispatcher.c's internal caches).
_typecodes = itertools.count()
def _autoincr():
n = next(_typecodes)
# 4 billion types should be enough, right?
assert n < 2 ** 32, "Limited to 4 billion types"
return n
_typecache: ptDict[weakref.ref, weakref.ref] = {}
def _on_type_disposal(wr, _pop=_typecache.pop):
_pop(wr, None)
class _TypeMetaclass(ABCMeta):
"""
A metaclass that will intern instances after they are created.
This is done by first creating a new instance (including calling
__init__, which sets up the required attributes for equality
and hashing), then looking it up in the _typecache registry.
"""
def __init__(cls, name, bases, orig_vars):
# __init__ is hooked to mark whether a Type class being defined is a
# Numba internal type (one which is defined somewhere under the `numba`
# module) or an external type (one which is defined elsewhere, for
# example a user defined type).
super(_TypeMetaclass, cls).__init__(name, bases, orig_vars)
root = (cls.__module__.split('.'))[0]
cls._is_internal = root == "numba"
def _intern(cls, inst):
# Try to intern the created instance
wr = weakref.ref(inst, _on_type_disposal)
orig = _typecache.get(wr)
orig = orig and orig()
if orig is not None:
return orig
else:
inst._code = _autoincr()
_typecache[wr] = wr
return inst
def __call__(cls, *args, **kwargs):
"""
Instantiate *cls* (a Type subclass, presumably) and intern it.
If an interned instance already exists, it is returned, otherwise
the new instance is returned.
"""
inst = type.__call__(cls, *args, **kwargs)
return cls._intern(inst)
def _type_reconstructor(reconstructor, reconstructor_args, state):
"""
Rebuild function for unpickling types.
"""
obj = reconstructor(*reconstructor_args)
if state:
obj.__dict__.update(state)
return type(obj)._intern(obj)
class Type(metaclass=_TypeMetaclass):
"""
The base class for all Numba types.
It is essential that proper equality comparison is implemented. The
default implementation uses the "key" property (overridable in subclasses)
for both comparison and hashing, to ensure sane behaviour.
"""
mutable = False
# Rather the type is reflected at the python<->nopython boundary
reflected = False
def __init__(self, name):
self.name = name
@property
def key(self):
"""
A property used for __eq__, __ne__ and __hash__. Can be overridden
in subclasses.
"""
return self.name
@property
def mangling_args(self):
"""
Returns `(basename, args)` where `basename` is the name of the type
and `args` is a sequence of parameters of the type.
Subclass should override to specialize the behavior.
By default, this returns `(self.name, ())`.
"""
return self.name, ()
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.key == other.key
def __ne__(self, other):
return not (self == other)
def __reduce__(self):
reconstructor, args, state = super(Type, self).__reduce__()
return (_type_reconstructor, (reconstructor, args, state))
def unify(self, typingctx, other):
"""
Try to unify this type with the *other*. A third type must
be returned, or None if unification is not possible.
Only override this if the coercion logic cannot be expressed
as simple casting rules.
"""
return None
def can_convert_to(self, typingctx, other):
"""
Check whether this type can be converted to the *other*.
If successful, must return a string describing the conversion, e.g.
"exact", "promote", "unsafe", "safe"; otherwise None is returned.
"""
return None
def can_convert_from(self, typingctx, other):
"""
Similar to *can_convert_to*, but in reverse. Only needed if
the type provides conversion from other types.
"""
return None
def is_precise(self):
"""
Whether this type is precise, i.e. can be part of a successful
type inference. Default implementation returns True.
"""
return True
def augment(self, other):
"""
Augment this type with the *other*. Return the augmented type,
or None if not supported.
"""
return None
# User-facing helpers. These are not part of the core Type API but
# are provided so that users can write e.g. `numba.boolean(1.5)`
# (returns True) or `types.int32(types.int32[:])` (returns something
# usable as a function signature).
def __call__(self, *args):
from numba.core.typing import signature
if len(args) == 1 and not isinstance(args[0], Type):
return self.cast_python_value(args[0])
return signature(self, # return_type
*args)
def __getitem__(self, args):
"""
Return an array of this type.
"""
from numba.core.types import Array
ndim, layout = self._determine_array_spec(args)
return Array(dtype=self, ndim=ndim, layout=layout)
def _determine_array_spec(self, args):
# XXX non-contiguous by default, even for 1d arrays,
# doesn't sound very intuitive
def validate_slice(s):
return isinstance(s, slice) and s.start is None and s.stop is None
if isinstance(args, (tuple, list)) and all(map(validate_slice, args)):
ndim = len(args)
if args[0].step == 1:
layout = 'F'
elif args[-1].step == 1:
layout = 'C'
else:
layout = 'A'
elif validate_slice(args):
ndim = 1
if args.step == 1:
layout = 'C'
else:
layout = 'A'
else:
# Raise a KeyError to not be handled by collection constructors (e.g. list).
raise KeyError(f"Can only index numba types with slices with no start or stop, got {args}.")
return ndim, layout
def cast_python_value(self, args):
raise NotImplementedError
@property
def is_internal(self):
""" Returns True if this class is an internally defined Numba type by
virtue of the module in which it is instantiated, False else."""
return self._is_internal
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[code={self._code}, name={self.name}]')
# XXX we should distinguish between Dummy (no meaningful
# representation, e.g. None or a builtin function) and Opaque (has a
# meaningful representation, e.g. ExternalFunctionPointer)
class Dummy(Type):
"""
Base class for types that do not really have a representation and are
compatible with a void*.
"""
class Hashable(Type):
"""
Base class for hashable types.
"""
class Number(Hashable):
"""
Base class for number types.
"""
def unify(self, typingctx, other):
"""
Unify the two number types using Numpy's rules.
"""
from numba.np import numpy_support
if isinstance(other, Number):
# XXX: this can produce unsafe conversions,
# e.g. would unify {int64, uint64} to float64
a = numpy_support.as_dtype(self)
b = numpy_support.as_dtype(other)
sel = np.promote_types(a, b)
return numpy_support.from_dtype(sel)
class Callable(Type):
"""
Base class for callables.
"""
@abstractmethod
def get_call_type(self, context, args, kws):
"""
Using the typing *context*, resolve the callable's signature for
the given arguments. A signature object is returned, or None.
"""
@abstractmethod
def get_call_signatures(self):
"""
Returns a tuple of (list of signatures, parameterized)
"""
class DTypeSpec(Type):
"""
Base class for types usable as "dtype" arguments to various Numpy APIs
(e.g. np.empty()).
"""
@abstractproperty
def dtype(self):
"""
The actual dtype denoted by this dtype spec (a Type instance).
"""
class IterableType(Type):
"""
Base class for iterable types.
"""
@abstractproperty
def iterator_type(self):
"""
The iterator type obtained when calling iter() (explicitly or implicitly).
"""
class Sized(Type):
"""
Base class for objects that support len()
"""
class ConstSized(Sized):
"""
For types that have a constant size
"""
@abstractmethod
def __len__(self):
pass
class IteratorType(IterableType):
"""
Base class for all iterator types.
Derived classes should implement the *yield_type* attribute.
"""
def __init__(self, name, **kwargs):
super(IteratorType, self).__init__(name, **kwargs)
@abstractproperty
def yield_type(self):
"""
The type of values yielded by the iterator.
"""
# This is a property to avoid recursivity (for pickling)
@property
def iterator_type(self):
return self
class Container(Sized, IterableType):
"""
Base class for container types.
"""
class Sequence(Container):
"""
Base class for 1d sequence types. Instances should have the *dtype*
attribute.
"""
class MutableSequence(Sequence):
"""
Base class for 1d mutable sequence types. Instances should have the
*dtype* attribute.
"""
class ArrayCompatible(Type):
"""
Type class for Numpy array-compatible objects (typically, objects
exposing an __array__ method).
Derived classes should implement the *as_array* attribute.
"""
# If overridden by a subclass, it should also implement typing
# for '__array_wrap__' with arguments (input, formal result).
array_priority = 0.0
@abstractproperty
def as_array(self):
"""
The equivalent array type, for operations supporting array-compatible
objects (such as ufuncs).
"""
# For compatibility with types.Array
@cached_property
def ndim(self):
return self.as_array.ndim
@cached_property
def layout(self):
return self.as_array.layout
@cached_property
def dtype(self):
return self.as_array.dtype
class Literal(Type):
"""Base class for Literal types.
Literal types contain the original Python value in the type.
A literal type should always be constructed from the `literal(val)`
function.
"""
# *ctor_map* is a dictionary mapping Python types to Literal subclasses
# for constructing a numba type for a given Python type.
# It is used in `literal(val)` function.
# To add new Literal subclass, register a new mapping to this dict.
ctor_map: ptDict[type, ptType['Literal']] = {}
# *_literal_type_cache* is used to cache the numba type of the given value.
_literal_type_cache = None
def __init__(self, value):
if type(self) is Literal:
raise TypeError(
"Cannot be constructed directly. "
"Use `numba.types.literal(value)` instead",
)
self._literal_init(value)
fmt = "Literal[{}]({})"
super(Literal, self).__init__(fmt.format(type(value).__name__, value))
def _literal_init(self, value):
self._literal_value = value
# We want to support constants of non-hashable values, therefore
# fall back on the value's id() if necessary.
try:
hash(value)
except TypeError:
self._key = id(value)
else:
self._key = value
@property
def literal_value(self):
return self._literal_value
@property
def literal_type(self):
if self._literal_type_cache is None:
from numba.core import typing
ctx = typing.Context()
try:
res = ctx.resolve_value_type(self.literal_value)
except ValueError:
# Not all literal types have a literal_value that can be
# resolved to a type, for example, LiteralStrKeyDict has a
# literal_value that is a python dict for which there's no
# `typeof` support.
msg = "{} has no attribute 'literal_type'".format(self)
raise AttributeError(msg)
self._literal_type_cache = res
return self._literal_type_cache
class TypeRef(Dummy):
"""Reference to a type.
Used when a type is passed as a value.
"""
def __init__(self, instance_type):
self.instance_type = instance_type
super(TypeRef, self).__init__('typeref[{}]'.format(self.instance_type))
@property
def key(self):
return self.instance_type
class InitialValue(object):
"""
Used as a mixin for a type will potentially have an initial value that will
be carried in the .initial_value attribute.
"""
def __init__(self, initial_value):
self._initial_value = initial_value
@property
def initial_value(self):
return self._initial_value
class Poison(Type):
"""
This is the "bottom" type in the type system. It won't unify and it's
unliteral version is Poison of itself. It's advisable for debugging purposes
to call the constructor with the type that's being poisoned (for whatever
reason) but this isn't strictly required.
"""
def __init__(self, ty):
self.ty = ty
super(Poison, self).__init__(name="Poison<%s>" % ty)
def __unliteral__(self):
return Poison(self)
def unify(self, typingctx, other):
return None
| bsd-2-clause |
boztalay/MeshnetTest | node.py | 1 | 7721 | from Tkinter import *
from basics import *
NODE_RADIUS = 5
NODE_COLOR = "green"
NODE_PENDING_COLOR = "purple"
CONNECTION_COLOR = "yellow"
PACKETS_SENT_MAX = 100
class NodeError(Exception):
pass
class Node:
def __init__(self, location):
self.location = location
self.connections = []
self.packetBuffer = []
self.packetsSent = []
self.connectionsTriedForDests = {}
self.connectionsFailedForDests = {}
self.isPendingAction = False
def addPacketToBuffer(self, packet, sourceNode):
# If this is the first time this node has gotten a packet for this destination,
# this will ensure that the connection to the node that first send this node
# a packet with this destination will be at the front of the list of connections
# to try for it. This makes sure that 1) the packet isn't just sent back to that node
# and 2) we can default to sending the packet back to it if all other connections
# have been tried. If the packet is marked as hitting a dead end, we add that
# connection to a list of failed connections for that destination.
if packet.destNode not in self.connectionsTriedForDests:
connectionsTriedForDest = []
self.connectionsTriedForDests[packet.destNode] = connectionsTriedForDest
else:
connectionsTriedForDest = self.connectionsTriedForDests[packet.destNode]
if packet.destNode not in self.connectionsFailedForDests:
connectionsFailedForDest = []
self.connectionsFailedForDests[packet.destNode] = connectionsFailedForDest
else:
connectionsFailedForDest = self.connectionsFailedForDests[packet.destNode]
connectionPacketCameFrom = None
for connection in self.connections:
if connection.destNode is sourceNode:
connectionPacketCameFrom = connection
if connectionPacketCameFrom is not None:
if connectionPacketCameFrom not in connectionsTriedForDest:
connectionsTriedForDest.append(connectionPacketCameFrom)
if packet.foundDeadEnd:
packet.foundDeadEnd = False
if connectionPacketCameFrom not in connectionsFailedForDest:
connectionsFailedForDest.append(connectionPacketCameFrom)
self.packetBuffer.append(packet)
def setPendingAction(self):
self.isPendingAction = True
def clearPendingAction(self):
self.isPendingAction = False
def connectTo(self, destNode):
self.clearPendingAction()
if destNode is self:
raise NodeError("Tried to connect a node to itself")
for connection in self.connections:
if connection.destNode is destNode:
raise NodeError("Tried to connect to a node that already has a connection")
self.connections.append(Connection(self, destNode))
def disconnectFrom(self, destNode):
for connection in self.connections:
if connection.destNode is destNode:
self.connections.remove(connection)
return
raise NodeError("Tried to disconnect from a node that doesn't have a connection")
def update(self):
unsendablePackets = []
for packet in self.packetBuffer:
if packet.destNode is self:
self.receivePacket(packet)
continue
sortedConnectionsForDest = sorted(self.connections, key=lambda connection: connection.destNode.distanceTo(packet.destNode))
connectionsTriedForDest = self.connectionsTriedForDests[packet.destNode]
connectionsFailedForDest = self.connectionsFailedForDests[packet.destNode]
connectionsToIgnore = []
if len(connectionsTriedForDest) > 0:
connectionsToIgnore.append(connectionsTriedForDest[0])
if packet in self.packetsSent:
# This means this node got a packet that it's already sent out,
# so there's probably a cycle in the connection it tried last.
# This will remove that connection from consideration (the last one tried)
connectionsToIgnore.append(connectionsTriedForDest[-1])
couldSend = False
for connection in sortedConnectionsForDest:
if connection not in connectionsFailedForDest and connection not in connectionsToIgnore:
connection.sendPacket(packet)
connectionsTriedForDest.append(connection)
couldSend = True
self.packetsSent.append(packet)
if len(self.packetsSent) > PACKETS_SENT_MAX:
self.packetsSent.pop(0)
break
if not couldSend:
if len(connectionsTriedForDest) > 0:
# No connections left to try, send it back to the node we got it from
# Index 0 will always be the first node that sent a packet with this destination
# Don't add the packet to the packets sent list, we aren't sending it on
packet.foundDeadEnd = True
connectionsTriedForDest[0].sendPacket(packet)
elif packet not in unsendablePackets:
unsendablePackets.append(packet)
self.packetBuffer = unsendablePackets
def updateConnections(self):
for connection in self.connections:
connection.update()
def draw(self, canvas):
nodeColor = NODE_COLOR
if self.isPendingAction:
nodeColor = NODE_PENDING_COLOR
canvas.create_rectangle(self.location.x - NODE_RADIUS, self.location.y - NODE_RADIUS,
self.location.x + NODE_RADIUS, self.location.y + NODE_RADIUS, outline=nodeColor)
if len(self.packetBuffer) > 0:
innerColor = self.packetBuffer[0].makeColor()
canvas.create_rectangle(self.location.x - (NODE_RADIUS - 2), self.location.y - (NODE_RADIUS - 2),
self.location.x + (NODE_RADIUS - 2), self.location.y + (NODE_RADIUS - 2), fill=innerColor)
def receivePacket(self, packet):
print "Got a packet!"
def distanceTo(self, otherNode):
return self.location.distanceTo(otherNode.location)
class Connection:
def __init__(self, sourceNode, destNode):
self.sourceNode = sourceNode
self.destNode = destNode
self.packetsToSend = []
def sendPacket(self, packet):
self.packetsToSend.append(packet)
def update(self):
while len(self.packetsToSend) > 0:
self.destNode.addPacketToBuffer(self.packetsToSend.pop(), self.sourceNode)
def draw(self, canvas):
canvas.create_line(self.sourceNode.location.x, self.sourceNode.location.y,
self.destNode.location.x, self.destNode.location.y, fill = CONNECTION_COLOR)
class Packet:
def __init__(self, sourceNode, destNode, message):
self.sourceNode = sourceNode
self.destNode = destNode
self.foundDeadEnd = False
self.message = message
self.color = None
def makeColor(self):
if self.color is not None:
return self.color
color = self.sourceNode.location.x & 0x3f
color = color << 6
color |= self.sourceNode.location.y & 0x3f
color = color << 6
color |= self.destNode.location.x & 0x3f
color = color << 6
color |= self.destNode.location.y & 0x3f
self.color = "#%0.6X" % color
return self.color
| mit |
devendermishrajio/nova_test_latest | nova/objects/cell_mapping.py | 30 | 3752 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as ovo
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova.objects import base
from nova.objects import fields
# NOTE(danms): Maintain Dict compatibility because of ovo bug 1474952
@base.NovaObjectRegistry.register
class CellMapping(base.NovaTimestampObject, base.NovaObject,
ovo.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'transport_url': fields.StringField(),
'database_connection': fields.StringField(),
}
@staticmethod
def _from_db_object(context, cell_mapping, db_cell_mapping):
for key in cell_mapping.fields:
setattr(cell_mapping, key, db_cell_mapping[key])
cell_mapping.obj_reset_changes()
cell_mapping._context = context
return cell_mapping
@staticmethod
def _get_by_uuid_from_db(context, uuid):
session = db_api.get_api_session()
with session.begin():
db_mapping = session.query(api_models.CellMapping).filter_by(
uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
return db_mapping
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_mapping = cls._get_by_uuid_from_db(context, uuid)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
def _create_in_db(context, updates):
session = db_api.get_api_session()
db_mapping = api_models.CellMapping()
db_mapping.update(updates)
db_mapping.save(session)
return db_mapping
@base.remotable
def create(self):
db_mapping = self._create_in_db(self._context, self.obj_get_changes())
self._from_db_object(self._context, self, db_mapping)
@staticmethod
def _save_in_db(context, uuid, updates):
session = db_api.get_api_session()
with session.begin():
db_mapping = session.query(
api_models.CellMapping).filter_by(uuid=uuid).first()
if not db_mapping:
raise exception.CellMappingNotFound(uuid=uuid)
db_mapping.update(updates)
session.add(db_mapping)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
db_mapping = self._save_in_db(self._context, self.uuid, changes)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
def _destroy_in_db(context, uuid):
session = db_api.get_api_session()
with session.begin():
result = session.query(api_models.CellMapping).filter_by(
uuid=uuid).delete()
if not result:
raise exception.CellMappingNotFound(uuid=uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.uuid)
| apache-2.0 |
vmax-feihu/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/test_server.py | 65 | 83484 | """Tests for openid.server.
"""
from openid.server import server
from openid import association, cryptutil, oidutil
from openid.message import Message, OPENID_NS, OPENID2_NS, OPENID1_NS, \
IDENTIFIER_SELECT, no_default, OPENID1_URL_LIMIT
from openid.store import memstore
import cgi
import unittest
import warnings
from urlparse import urlparse
# In general, if you edit or add tests here, try to move in the direction
# of testing smaller units. For testing the external interfaces, we'll be
# developing an implementation-agnostic testing suite.
# for more, see /etc/ssh/moduli
ALT_MODULUS = 0xCAADDDEC1667FC68B5FA15D53C4E1532DD24561A1A2D47A12C01ABEA1E00731F6921AAC40742311FDF9E634BB7131BEE1AF240261554389A910425E044E88C8359B010F5AD2B80E29CB1A5B027B19D9E01A6F63A6F45E5D7ED2FF6A2A0085050A7D0CF307C3DB51D2490355907B4427C23A98DF1EB8ABEF2BA209BB7AFFE86A7
ALT_GEN = 5
class CatchLogs(object):
def setUp(self):
self.old_logger = oidutil.log
oidutil.log = self.gotLogMessage
self.messages = []
def gotLogMessage(self, message):
self.messages.append(message)
def tearDown(self):
oidutil.log = self.old_logger
class TestProtocolError(unittest.TestCase):
def test_browserWithReturnTo(self):
return_to = "http://rp.unittest/consumer"
# will be a ProtocolError raised by Decode or CheckIDRequest.answer
args = Message.fromPostArgs({
'openid.mode': 'monkeydance',
'openid.identity': 'http://wagu.unittest/',
'openid.return_to': return_to,
})
e = server.ProtocolError(args, "plucky")
self.failUnless(e.hasReturnTo())
expected_args = {
'openid.mode': ['error'],
'openid.error': ['plucky'],
}
rt_base, result_args = e.encodeToURL().split('?', 1)
result_args = cgi.parse_qs(result_args)
self.failUnlessEqual(result_args, expected_args)
def test_browserWithReturnTo_OpenID2_GET(self):
return_to = "http://rp.unittest/consumer"
# will be a ProtocolError raised by Decode or CheckIDRequest.answer
args = Message.fromPostArgs({
'openid.ns': OPENID2_NS,
'openid.mode': 'monkeydance',
'openid.identity': 'http://wagu.unittest/',
'openid.claimed_id': 'http://wagu.unittest/',
'openid.return_to': return_to,
})
e = server.ProtocolError(args, "plucky")
self.failUnless(e.hasReturnTo())
expected_args = {
'openid.ns': [OPENID2_NS],
'openid.mode': ['error'],
'openid.error': ['plucky'],
}
rt_base, result_args = e.encodeToURL().split('?', 1)
result_args = cgi.parse_qs(result_args)
self.failUnlessEqual(result_args, expected_args)
def test_browserWithReturnTo_OpenID2_POST(self):
return_to = "http://rp.unittest/consumer" + ('x' * OPENID1_URL_LIMIT)
# will be a ProtocolError raised by Decode or CheckIDRequest.answer
args = Message.fromPostArgs({
'openid.ns': OPENID2_NS,
'openid.mode': 'monkeydance',
'openid.identity': 'http://wagu.unittest/',
'openid.claimed_id': 'http://wagu.unittest/',
'openid.return_to': return_to,
})
e = server.ProtocolError(args, "plucky")
self.failUnless(e.hasReturnTo())
expected_args = {
'openid.ns': [OPENID2_NS],
'openid.mode': ['error'],
'openid.error': ['plucky'],
}
self.failUnless(e.whichEncoding() == server.ENCODE_HTML_FORM)
self.failUnless(e.toFormMarkup() == e.toMessage().toFormMarkup(
args.getArg(OPENID_NS, 'return_to')))
def test_browserWithReturnTo_OpenID1_exceeds_limit(self):
return_to = "http://rp.unittest/consumer" + ('x' * OPENID1_URL_LIMIT)
# will be a ProtocolError raised by Decode or CheckIDRequest.answer
args = Message.fromPostArgs({
'openid.mode': 'monkeydance',
'openid.identity': 'http://wagu.unittest/',
'openid.return_to': return_to,
})
e = server.ProtocolError(args, "plucky")
self.failUnless(e.hasReturnTo())
expected_args = {
'openid.mode': ['error'],
'openid.error': ['plucky'],
}
self.failUnless(e.whichEncoding() == server.ENCODE_URL)
rt_base, result_args = e.encodeToURL().split('?', 1)
result_args = cgi.parse_qs(result_args)
self.failUnlessEqual(result_args, expected_args)
def test_noReturnTo(self):
# will be a ProtocolError raised by Decode or CheckIDRequest.answer
args = Message.fromPostArgs({
'openid.mode': 'zebradance',
'openid.identity': 'http://wagu.unittest/',
})
e = server.ProtocolError(args, "waffles")
self.failIf(e.hasReturnTo())
expected = """error:waffles
mode:error
"""
self.failUnlessEqual(e.encodeToKVForm(), expected)
def test_noMessage(self):
e = server.ProtocolError(None, "no moar pancakes")
self.failIf(e.hasReturnTo())
self.failUnlessEqual(e.whichEncoding(), None)
class TestDecode(unittest.TestCase):
def setUp(self):
self.claimed_id = 'http://de.legating.de.coder.unittest/'
self.id_url = "http://decoder.am.unittest/"
self.rt_url = "http://rp.unittest/foobot/?qux=zam"
self.tr_url = "http://rp.unittest/"
self.assoc_handle = "{assoc}{handle}"
self.op_endpoint = 'http://endpoint.unittest/encode'
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, self.op_endpoint)
self.decode = self.server.decoder.decode
self.decode = server.Decoder(self.server).decode
def test_none(self):
args = {}
r = self.decode(args)
self.failUnlessEqual(r, None)
def test_irrelevant(self):
args = {
'pony': 'spotted',
'sreg.mutant_power': 'decaffinator',
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_bad(self):
args = {
'openid.mode': 'twos-compliment',
'openid.pants': 'zippered',
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_dictOfLists(self):
args = {
'openid.mode': ['checkid_setup'],
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.trust_root': self.tr_url,
}
try:
result = self.decode(args)
except TypeError, err:
self.failUnless(str(err).find('values') != -1, err)
else:
self.fail("Expected TypeError, but got result %s" % (result,))
def test_checkidImmediate(self):
args = {
'openid.mode': 'checkid_immediate',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.trust_root': self.tr_url,
# should be ignored
'openid.some.extension': 'junk',
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckIDRequest))
self.failUnlessEqual(r.mode, "checkid_immediate")
self.failUnlessEqual(r.immediate, True)
self.failUnlessEqual(r.identity, self.id_url)
self.failUnlessEqual(r.trust_root, self.tr_url)
self.failUnlessEqual(r.return_to, self.rt_url)
self.failUnlessEqual(r.assoc_handle, self.assoc_handle)
def test_checkidSetup(self):
args = {
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.trust_root': self.tr_url,
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckIDRequest))
self.failUnlessEqual(r.mode, "checkid_setup")
self.failUnlessEqual(r.immediate, False)
self.failUnlessEqual(r.identity, self.id_url)
self.failUnlessEqual(r.trust_root, self.tr_url)
self.failUnlessEqual(r.return_to, self.rt_url)
def test_checkidSetupOpenID2(self):
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.claimed_id': self.claimed_id,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.realm': self.tr_url,
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckIDRequest))
self.failUnlessEqual(r.mode, "checkid_setup")
self.failUnlessEqual(r.immediate, False)
self.failUnlessEqual(r.identity, self.id_url)
self.failUnlessEqual(r.claimed_id, self.claimed_id)
self.failUnlessEqual(r.trust_root, self.tr_url)
self.failUnlessEqual(r.return_to, self.rt_url)
def test_checkidSetupNoClaimedIDOpenID2(self):
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.realm': self.tr_url,
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_checkidSetupNoIdentityOpenID2(self):
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.realm': self.tr_url,
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckIDRequest))
self.failUnlessEqual(r.mode, "checkid_setup")
self.failUnlessEqual(r.immediate, False)
self.failUnlessEqual(r.identity, None)
self.failUnlessEqual(r.trust_root, self.tr_url)
self.failUnlessEqual(r.return_to, self.rt_url)
def test_checkidSetupNoReturnOpenID1(self):
"""Make sure an OpenID 1 request cannot be decoded if it lacks
a return_to.
"""
args = {
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.trust_root': self.tr_url,
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_checkidSetupNoReturnOpenID2(self):
"""Make sure an OpenID 2 request with no return_to can be
decoded, and make sure a response to such a request raises
NoReturnToError.
"""
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.claimed_id': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.realm': self.tr_url,
}
self.failUnless(isinstance(self.decode(args), server.CheckIDRequest))
req = self.decode(args)
self.assertRaises(server.NoReturnToError, req.answer, False)
self.assertRaises(server.NoReturnToError, req.encodeToURL, 'bogus')
self.assertRaises(server.NoReturnToError, req.getCancelURL)
def test_checkidSetupRealmRequiredOpenID2(self):
"""Make sure that an OpenID 2 request which lacks return_to
cannot be decoded if it lacks a realm. Spec: This value
(openid.realm) MUST be sent if openid.return_to is omitted.
"""
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_checkidSetupBadReturn(self):
args = {
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': 'not a url',
}
try:
result = self.decode(args)
except server.ProtocolError, err:
self.failUnless(err.openid_message)
else:
self.fail("Expected ProtocolError, instead returned with %s" %
(result,))
def test_checkidSetupUntrustedReturn(self):
args = {
'openid.mode': 'checkid_setup',
'openid.identity': self.id_url,
'openid.assoc_handle': self.assoc_handle,
'openid.return_to': self.rt_url,
'openid.trust_root': 'http://not-the-return-place.unittest/',
}
try:
result = self.decode(args)
except server.UntrustedReturnURL, err:
self.failUnless(err.openid_message)
else:
self.fail("Expected UntrustedReturnURL, instead returned with %s" %
(result,))
def test_checkAuth(self):
args = {
'openid.mode': 'check_authentication',
'openid.assoc_handle': '{dumb}{handle}',
'openid.sig': 'sigblob',
'openid.signed': 'identity,return_to,response_nonce,mode',
'openid.identity': 'signedval1',
'openid.return_to': 'signedval2',
'openid.response_nonce': 'signedval3',
'openid.baz': 'unsigned',
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckAuthRequest))
self.failUnlessEqual(r.mode, 'check_authentication')
self.failUnlessEqual(r.sig, 'sigblob')
def test_checkAuthMissingSignature(self):
args = {
'openid.mode': 'check_authentication',
'openid.assoc_handle': '{dumb}{handle}',
'openid.signed': 'foo,bar,mode',
'openid.foo': 'signedval1',
'openid.bar': 'signedval2',
'openid.baz': 'unsigned',
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_checkAuthAndInvalidate(self):
args = {
'openid.mode': 'check_authentication',
'openid.assoc_handle': '{dumb}{handle}',
'openid.invalidate_handle': '[[SMART_handle]]',
'openid.sig': 'sigblob',
'openid.signed': 'identity,return_to,response_nonce,mode',
'openid.identity': 'signedval1',
'openid.return_to': 'signedval2',
'openid.response_nonce': 'signedval3',
'openid.baz': 'unsigned',
}
r = self.decode(args)
self.failUnless(isinstance(r, server.CheckAuthRequest))
self.failUnlessEqual(r.invalidate_handle, '[[SMART_handle]]')
def test_associateDH(self):
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "Rzup9265tw==",
}
r = self.decode(args)
self.failUnless(isinstance(r, server.AssociateRequest))
self.failUnlessEqual(r.mode, "associate")
self.failUnlessEqual(r.session.session_type, "DH-SHA1")
self.failUnlessEqual(r.assoc_type, "HMAC-SHA1")
self.failUnless(r.session.consumer_pubkey)
def test_associateDHMissingKey(self):
"""Trying DH assoc w/o public key"""
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
}
# Using DH-SHA1 without supplying dh_consumer_public is an error.
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_associateDHpubKeyNotB64(self):
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "donkeydonkeydonkey",
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_associateDHModGen(self):
# test dh with non-default but valid values for dh_modulus and dh_gen
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "Rzup9265tw==",
'openid.dh_modulus': cryptutil.longToBase64(ALT_MODULUS),
'openid.dh_gen': cryptutil.longToBase64(ALT_GEN) ,
}
r = self.decode(args)
self.failUnless(isinstance(r, server.AssociateRequest))
self.failUnlessEqual(r.mode, "associate")
self.failUnlessEqual(r.session.session_type, "DH-SHA1")
self.failUnlessEqual(r.assoc_type, "HMAC-SHA1")
self.failUnlessEqual(r.session.dh.modulus, ALT_MODULUS)
self.failUnlessEqual(r.session.dh.generator, ALT_GEN)
self.failUnless(r.session.consumer_pubkey)
def test_associateDHCorruptModGen(self):
# test dh with non-default but valid values for dh_modulus and dh_gen
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "Rzup9265tw==",
'openid.dh_modulus': 'pizza',
'openid.dh_gen': 'gnocchi',
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_associateDHMissingModGen(self):
# test dh with non-default but valid values for dh_modulus and dh_gen
args = {
'openid.mode': 'associate',
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "Rzup9265tw==",
'openid.dh_modulus': 'pizza',
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
# def test_associateDHInvalidModGen(self):
# # test dh with properly encoded values that are not a valid
# # modulus/generator combination.
# args = {
# 'openid.mode': 'associate',
# 'openid.session_type': 'DH-SHA1',
# 'openid.dh_consumer_public': "Rzup9265tw==",
# 'openid.dh_modulus': cryptutil.longToBase64(9),
# 'openid.dh_gen': cryptutil.longToBase64(27) ,
# }
# self.failUnlessRaises(server.ProtocolError, self.decode, args)
# test_associateDHInvalidModGen.todo = "low-priority feature"
def test_associateWeirdSession(self):
args = {
'openid.mode': 'associate',
'openid.session_type': 'FLCL6',
'openid.dh_consumer_public': "YQ==\n",
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_associatePlain(self):
args = {
'openid.mode': 'associate',
}
r = self.decode(args)
self.failUnless(isinstance(r, server.AssociateRequest))
self.failUnlessEqual(r.mode, "associate")
self.failUnlessEqual(r.session.session_type, "no-encryption")
self.failUnlessEqual(r.assoc_type, "HMAC-SHA1")
def test_nomode(self):
args = {
'openid.session_type': 'DH-SHA1',
'openid.dh_consumer_public': "my public keeey",
}
self.failUnlessRaises(server.ProtocolError, self.decode, args)
def test_invalidns(self):
args = {'openid.ns': 'Tuesday',
'openid.mode': 'associate'}
try:
r = self.decode(args)
except server.ProtocolError, err:
# Assert that the ProtocolError does have a Message attached
# to it, even though the request wasn't a well-formed Message.
self.failUnless(err.openid_message)
# The error message contains the bad openid.ns.
self.failUnless('Tuesday' in str(err), str(err))
else:
self.fail("Expected ProtocolError but returned with %r" % (r,))
class TestEncode(unittest.TestCase):
def setUp(self):
self.encoder = server.Encoder()
self.encode = self.encoder.encode
self.op_endpoint = 'http://endpoint.unittest/encode'
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, self.op_endpoint)
def test_id_res_OpenID2_GET(self):
"""
Check that when an OpenID 2 response does not exceed the
OpenID 1 message size, a GET response (i.e., redirect) is
issued.
"""
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'ns': OPENID2_NS,
'mode': 'id_res',
'identity': request.identity,
'claimed_id': request.identity,
'return_to': request.return_to,
})
self.failIf(response.renderAsForm())
self.failUnless(response.whichEncoding() == server.ENCODE_URL)
webresponse = self.encode(response)
self.failUnless(webresponse.headers.has_key('location'))
def test_id_res_OpenID2_POST(self):
"""
Check that when an OpenID 2 response exceeds the OpenID 1
message size, a POST response (i.e., an HTML form) is
returned.
"""
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'ns': OPENID2_NS,
'mode': 'id_res',
'identity': request.identity,
'claimed_id': request.identity,
'return_to': 'x' * OPENID1_URL_LIMIT,
})
self.failUnless(response.renderAsForm())
self.failUnless(len(response.encodeToURL()) > OPENID1_URL_LIMIT)
self.failUnless(response.whichEncoding() == server.ENCODE_HTML_FORM)
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.body, response.toFormMarkup())
def test_toFormMarkup(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'ns': OPENID2_NS,
'mode': 'id_res',
'identity': request.identity,
'claimed_id': request.identity,
'return_to': 'x' * OPENID1_URL_LIMIT,
})
form_markup = response.toFormMarkup({'foo':'bar'})
self.failUnless(' foo="bar"' in form_markup)
def test_toHTML(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'ns': OPENID2_NS,
'mode': 'id_res',
'identity': request.identity,
'claimed_id': request.identity,
'return_to': 'x' * OPENID1_URL_LIMIT,
})
html = response.toHTML()
self.failUnless('<html>' in html)
self.failUnless('</html>' in html)
self.failUnless('<body onload=' in html)
self.failUnless('<form' in html)
self.failUnless('http://bombom.unittest/' in html)
def test_id_res_OpenID1_exceeds_limit(self):
"""
Check that when an OpenID 1 response exceeds the OpenID 1
message size, a GET response is issued. Technically, this
shouldn't be permitted by the library, but this test is in
place to preserve the status quo for OpenID 1.
"""
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'mode': 'id_res',
'identity': request.identity,
'return_to': 'x' * OPENID1_URL_LIMIT,
})
self.failIf(response.renderAsForm())
self.failUnless(len(response.encodeToURL()) > OPENID1_URL_LIMIT)
self.failUnless(response.whichEncoding() == server.ENCODE_URL)
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.headers['location'], response.encodeToURL())
def test_id_res(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'mode': 'id_res',
'identity': request.identity,
'return_to': request.return_to,
})
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.code, server.HTTP_REDIRECT)
self.failUnless(webresponse.headers.has_key('location'))
location = webresponse.headers['location']
self.failUnless(location.startswith(request.return_to),
"%s does not start with %s" % (location,
request.return_to))
# argh.
q2 = dict(cgi.parse_qsl(urlparse(location)[4]))
expected = response.fields.toPostArgs()
self.failUnlessEqual(q2, expected)
def test_cancel(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'mode': 'cancel',
})
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.code, server.HTTP_REDIRECT)
self.failUnless(webresponse.headers.has_key('location'))
def test_cancelToForm(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'mode': 'cancel',
})
form = response.toFormMarkup()
self.failUnless(form)
def test_assocReply(self):
msg = Message(OPENID2_NS)
msg.setArg(OPENID2_NS, 'session_type', 'no-encryption')
request = server.AssociateRequest.fromMessage(msg)
response = server.OpenIDResponse(request)
response.fields = Message.fromPostArgs(
{'openid.assoc_handle': "every-zig"})
webresponse = self.encode(response)
body = """assoc_handle:every-zig
"""
self.failUnlessEqual(webresponse.code, server.HTTP_OK)
self.failUnlessEqual(webresponse.headers, {})
self.failUnlessEqual(webresponse.body, body)
def test_checkauthReply(self):
request = server.CheckAuthRequest('a_sock_monkey',
'siggggg',
[])
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'is_valid': 'true',
'invalidate_handle': 'xXxX:xXXx'
})
body = """invalidate_handle:xXxX:xXXx
is_valid:true
"""
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.code, server.HTTP_OK)
self.failUnlessEqual(webresponse.headers, {})
self.failUnlessEqual(webresponse.body, body)
def test_unencodableError(self):
args = Message.fromPostArgs({
'openid.identity': 'http://limu.unittest/',
})
e = server.ProtocolError(args, "wet paint")
self.failUnlessRaises(server.EncodingError, self.encode, e)
def test_encodableError(self):
args = Message.fromPostArgs({
'openid.mode': 'associate',
'openid.identity': 'http://limu.unittest/',
})
body="error:snoot\nmode:error\n"
webresponse = self.encode(server.ProtocolError(args, "snoot"))
self.failUnlessEqual(webresponse.code, server.HTTP_ERROR)
self.failUnlessEqual(webresponse.headers, {})
self.failUnlessEqual(webresponse.body, body)
class TestSigningEncode(unittest.TestCase):
def setUp(self):
self._dumb_key = server.Signatory._dumb_key
self._normal_key = server.Signatory._normal_key
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, "http://signing.unittest/enc")
self.request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
self.request.message = Message(OPENID2_NS)
self.response = server.OpenIDResponse(self.request)
self.response.fields = Message.fromOpenIDArgs({
'mode': 'id_res',
'identity': self.request.identity,
'return_to': self.request.return_to,
})
self.signatory = server.Signatory(self.store)
self.encoder = server.SigningEncoder(self.signatory)
self.encode = self.encoder.encode
def test_idres(self):
assoc_handle = '{bicycle}{shed}'
self.store.storeAssociation(
self._normal_key,
association.Association.fromExpiresIn(60, assoc_handle,
'sekrit', 'HMAC-SHA1'))
self.request.assoc_handle = assoc_handle
webresponse = self.encode(self.response)
self.failUnlessEqual(webresponse.code, server.HTTP_REDIRECT)
self.failUnless(webresponse.headers.has_key('location'))
location = webresponse.headers['location']
query = cgi.parse_qs(urlparse(location)[4])
self.failUnless('openid.sig' in query)
self.failUnless('openid.assoc_handle' in query)
self.failUnless('openid.signed' in query)
def test_idresDumb(self):
webresponse = self.encode(self.response)
self.failUnlessEqual(webresponse.code, server.HTTP_REDIRECT)
self.failUnless(webresponse.headers.has_key('location'))
location = webresponse.headers['location']
query = cgi.parse_qs(urlparse(location)[4])
self.failUnless('openid.sig' in query)
self.failUnless('openid.assoc_handle' in query)
self.failUnless('openid.signed' in query)
def test_forgotStore(self):
self.encoder.signatory = None
self.failUnlessRaises(ValueError, self.encode, self.response)
def test_cancel(self):
request = server.CheckIDRequest(
identity = 'http://bombom.unittest/',
trust_root = 'http://burr.unittest/',
return_to = 'http://burr.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
request.message = Message(OPENID2_NS)
response = server.OpenIDResponse(request)
response.fields.setArg(OPENID_NS, 'mode', 'cancel')
webresponse = self.encode(response)
self.failUnlessEqual(webresponse.code, server.HTTP_REDIRECT)
self.failUnless(webresponse.headers.has_key('location'))
location = webresponse.headers['location']
query = cgi.parse_qs(urlparse(location)[4])
self.failIf('openid.sig' in query, response.fields.toPostArgs())
def test_assocReply(self):
msg = Message(OPENID2_NS)
msg.setArg(OPENID2_NS, 'session_type', 'no-encryption')
request = server.AssociateRequest.fromMessage(msg)
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({'assoc_handle': "every-zig"})
webresponse = self.encode(response)
body = """assoc_handle:every-zig
"""
self.failUnlessEqual(webresponse.code, server.HTTP_OK)
self.failUnlessEqual(webresponse.headers, {})
self.failUnlessEqual(webresponse.body, body)
def test_alreadySigned(self):
self.response.fields.setArg(OPENID_NS, 'sig', 'priorSig==')
self.failUnlessRaises(server.AlreadySigned, self.encode, self.response)
class TestCheckID(unittest.TestCase):
def setUp(self):
self.op_endpoint = 'http://endpoint.unittest/'
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, self.op_endpoint)
self.request = server.CheckIDRequest(
identity = 'http://bambam.unittest/',
trust_root = 'http://bar.unittest/',
return_to = 'http://bar.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
self.request.message = Message(OPENID2_NS)
def test_trustRootInvalid(self):
self.request.trust_root = "http://foo.unittest/17"
self.request.return_to = "http://foo.unittest/39"
self.failIf(self.request.trustRootValid())
def test_trustRootValid(self):
self.request.trust_root = "http://foo.unittest/"
self.request.return_to = "http://foo.unittest/39"
self.failUnless(self.request.trustRootValid())
def test_malformedTrustRoot(self):
self.request.trust_root = "invalid://trust*root/"
self.request.return_to = "http://foo.unittest/39"
sentinel = object()
self.request.message = sentinel
try:
result = self.request.trustRootValid()
except server.MalformedTrustRoot, why:
self.failUnless(sentinel is why.openid_message)
else:
self.fail('Expected MalformedTrustRoot exception. Got %r'
% (result,))
def test_trustRootValidNoReturnTo(self):
request = server.CheckIDRequest(
identity = 'http://bambam.unittest/',
trust_root = 'http://bar.unittest/',
return_to = None,
immediate = False,
op_endpoint = self.server.op_endpoint,
)
self.failUnless(request.trustRootValid())
def test_returnToVerified_callsVerify(self):
"""Make sure that verifyReturnTo is calling the trustroot
function verifyReturnTo
"""
def withVerifyReturnTo(new_verify, callable):
old_verify = server.verifyReturnTo
try:
server.verifyReturnTo = new_verify
return callable()
finally:
server.verifyReturnTo = old_verify
# Ensure that exceptions are passed through
sentinel = Exception()
def vrfyExc(trust_root, return_to):
self.failUnlessEqual(self.request.trust_root, trust_root)
self.failUnlessEqual(self.request.return_to, return_to)
raise sentinel
try:
withVerifyReturnTo(vrfyExc, self.request.returnToVerified)
except Exception, e:
self.failUnless(e is sentinel, e)
# Ensure that True and False are passed through unchanged
def constVerify(val):
def verify(trust_root, return_to):
self.failUnlessEqual(self.request.trust_root, trust_root)
self.failUnlessEqual(self.request.return_to, return_to)
return val
return verify
for val in [True, False]:
self.failUnlessEqual(
val,
withVerifyReturnTo(constVerify(val),
self.request.returnToVerified))
def _expectAnswer(self, answer, identity=None, claimed_id=None):
expected_list = [
('mode', 'id_res'),
('return_to', self.request.return_to),
('op_endpoint', self.op_endpoint),
]
if identity:
expected_list.append(('identity', identity))
if claimed_id:
expected_list.append(('claimed_id', claimed_id))
else:
expected_list.append(('claimed_id', identity))
for k, expected in expected_list:
actual = answer.fields.getArg(OPENID_NS, k)
self.failUnlessEqual(actual, expected, "%s: expected %s, got %s" % (k, expected, actual))
self.failUnless(answer.fields.hasKey(OPENID_NS, 'response_nonce'))
self.failUnless(answer.fields.getOpenIDNamespace() == OPENID2_NS)
# One for nonce, one for ns
self.failUnlessEqual(len(answer.fields.toPostArgs()),
len(expected_list) + 2,
answer.fields.toPostArgs())
def test_answerAllow(self):
"""Check the fields specified by "Positive Assertions"
including mode=id_res, identity, claimed_id, op_endpoint, return_to
"""
answer = self.request.answer(True)
self.failUnlessEqual(answer.request, self.request)
self._expectAnswer(answer, self.request.identity)
def test_answerAllowDelegatedIdentity(self):
self.request.claimed_id = 'http://delegating.unittest/'
answer = self.request.answer(True)
self._expectAnswer(answer, self.request.identity,
self.request.claimed_id)
def test_answerAllowDelegatedIdentity2(self):
# This time with the identity argument explicitly passed in to
# answer()
self.request.claimed_id = 'http://delegating.unittest/'
answer = self.request.answer(True, identity='http://bambam.unittest/')
self._expectAnswer(answer, self.request.identity,
self.request.claimed_id)
def test_answerAllowWithoutIdentityReally(self):
self.request.identity = None
answer = self.request.answer(True)
self.failUnlessEqual(answer.request, self.request)
self._expectAnswer(answer)
def test_answerAllowAnonymousFail(self):
self.request.identity = None
# XXX - Check on this, I think this behavior is legal in OpenID 2.0?
self.failUnlessRaises(
ValueError, self.request.answer, True, identity="=V")
def test_answerAllowWithIdentity(self):
self.request.identity = IDENTIFIER_SELECT
selected_id = 'http://anon.unittest/9861'
answer = self.request.answer(True, identity=selected_id)
self._expectAnswer(answer, selected_id)
def test_answerAllowWithDelegatedIdentityOpenID2(self):
"""Answer an IDENTIFIER_SELECT case with a delegated identifier.
"""
# claimed_id delegates to selected_id here.
self.request.identity = IDENTIFIER_SELECT
selected_id = 'http://anon.unittest/9861'
claimed_id = 'http://monkeyhat.unittest/'
answer = self.request.answer(True, identity=selected_id,
claimed_id=claimed_id)
self._expectAnswer(answer, selected_id, claimed_id)
def test_answerAllowWithDelegatedIdentityOpenID1(self):
"""claimed_id parameter doesn't exist in OpenID 1.
"""
self.request.message = Message(OPENID1_NS)
# claimed_id delegates to selected_id here.
self.request.identity = IDENTIFIER_SELECT
selected_id = 'http://anon.unittest/9861'
claimed_id = 'http://monkeyhat.unittest/'
self.failUnlessRaises(server.VersionError,
self.request.answer, True,
identity=selected_id,
claimed_id=claimed_id)
def test_answerAllowWithAnotherIdentity(self):
# XXX - Check on this, I think this behavior is legal in OpenID 2.0?
self.failUnlessRaises(ValueError, self.request.answer, True,
identity="http://pebbles.unittest/")
def test_answerAllowWithIdentityNormalization(self):
# The RP has sent us a non-normalized value for openid.identity,
# and the library user is passing an explicit value for identity
# to CheckIDRequest.answer.
non_normalized = 'http://bambam.unittest'
normalized = non_normalized + '/'
self.request.identity = non_normalized
self.request.claimed_id = non_normalized
answer = self.request.answer(True, identity=normalized)
# Expect the values that were sent in the request, even though
# they're not normalized.
self._expectAnswer(answer, identity=non_normalized,
claimed_id=non_normalized)
def test_answerAllowNoIdentityOpenID1(self):
self.request.message = Message(OPENID1_NS)
self.request.identity = None
self.failUnlessRaises(ValueError, self.request.answer, True,
identity=None)
def test_answerAllowForgotEndpoint(self):
self.request.op_endpoint = None
self.failUnlessRaises(RuntimeError, self.request.answer, True)
def test_checkIDWithNoIdentityOpenID1(self):
msg = Message(OPENID1_NS)
msg.setArg(OPENID_NS, 'return_to', 'bogus')
msg.setArg(OPENID_NS, 'trust_root', 'bogus')
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'assoc_handle', 'bogus')
self.failUnlessRaises(server.ProtocolError,
server.CheckIDRequest.fromMessage,
msg, self.server)
def test_fromMessageClaimedIDWithoutIdentityOpenID2(self):
name = 'https://example.myopenid.com'
msg = Message(OPENID2_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'return_to', 'http://invalid:8000/rt')
msg.setArg(OPENID_NS, 'claimed_id', name)
self.failUnlessRaises(server.ProtocolError,
server.CheckIDRequest.fromMessage,
msg, self.server)
def test_fromMessageIdentityWithoutClaimedIDOpenID2(self):
name = 'https://example.myopenid.com'
msg = Message(OPENID2_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'return_to', 'http://invalid:8000/rt')
msg.setArg(OPENID_NS, 'identity', name)
self.failUnlessRaises(server.ProtocolError,
server.CheckIDRequest.fromMessage,
msg, self.server)
def test_trustRootOpenID1(self):
"""Ignore openid.realm in OpenID 1"""
msg = Message(OPENID1_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'trust_root', 'http://real_trust_root/')
msg.setArg(OPENID_NS, 'realm', 'http://fake_trust_root/')
msg.setArg(OPENID_NS, 'return_to', 'http://real_trust_root/foo')
msg.setArg(OPENID_NS, 'assoc_handle', 'bogus')
msg.setArg(OPENID_NS, 'identity', 'george')
result = server.CheckIDRequest.fromMessage(msg, self.server.op_endpoint)
self.failUnless(result.trust_root == 'http://real_trust_root/')
def test_trustRootOpenID2(self):
"""Ignore openid.trust_root in OpenID 2"""
msg = Message(OPENID2_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'realm', 'http://real_trust_root/')
msg.setArg(OPENID_NS, 'trust_root', 'http://fake_trust_root/')
msg.setArg(OPENID_NS, 'return_to', 'http://real_trust_root/foo')
msg.setArg(OPENID_NS, 'assoc_handle', 'bogus')
msg.setArg(OPENID_NS, 'identity', 'george')
msg.setArg(OPENID_NS, 'claimed_id', 'george')
result = server.CheckIDRequest.fromMessage(msg, self.server.op_endpoint)
self.failUnless(result.trust_root == 'http://real_trust_root/')
def test_answerAllowNoTrustRoot(self):
self.request.trust_root = None
answer = self.request.answer(True)
self.failUnlessEqual(answer.request, self.request)
self._expectAnswer(answer, self.request.identity)
def test_fromMessageWithoutTrustRoot(self):
msg = Message(OPENID2_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'return_to', 'http://real_trust_root/foo')
msg.setArg(OPENID_NS, 'assoc_handle', 'bogus')
msg.setArg(OPENID_NS, 'identity', 'george')
msg.setArg(OPENID_NS, 'claimed_id', 'george')
result = server.CheckIDRequest.fromMessage(msg, self.server.op_endpoint)
self.failUnlessEqual(result.trust_root, 'http://real_trust_root/foo')
def test_fromMessageWithEmptyTrustRoot(self):
return_to = u'http://someplace.invalid/?go=thing'
msg = Message.fromPostArgs({
u'openid.assoc_handle': u'{blah}{blah}{OZivdQ==}',
u'openid.claimed_id': u'http://delegated.invalid/',
u'openid.identity': u'http://op-local.example.com/',
u'openid.mode': u'checkid_setup',
u'openid.ns': u'http://openid.net/signon/1.0',
u'openid.return_to': return_to,
u'openid.trust_root': u''})
result = server.CheckIDRequest.fromMessage(msg, self.server.op_endpoint)
self.failUnlessEqual(result.trust_root, return_to)
def test_fromMessageWithoutTrustRootOrReturnTo(self):
msg = Message(OPENID2_NS)
msg.setArg(OPENID_NS, 'mode', 'checkid_setup')
msg.setArg(OPENID_NS, 'assoc_handle', 'bogus')
msg.setArg(OPENID_NS, 'identity', 'george')
msg.setArg(OPENID_NS, 'claimed_id', 'george')
self.failUnlessRaises(server.ProtocolError,
server.CheckIDRequest.fromMessage,
msg, self.server.op_endpoint)
def test_answerAllowNoEndpointOpenID1(self):
"""Test .allow() with an OpenID 1.x Message on a CheckIDRequest
built without an op_endpoint parameter.
"""
identity = 'http://bambam.unittest/'
reqmessage = Message.fromOpenIDArgs({
'identity': identity,
'trust_root': 'http://bar.unittest/',
'return_to': 'http://bar.unittest/999',
})
self.request = server.CheckIDRequest.fromMessage(reqmessage, None)
answer = self.request.answer(True)
expected_list = [
('mode', 'id_res'),
('return_to', self.request.return_to),
('identity', identity),
]
for k, expected in expected_list:
actual = answer.fields.getArg(OPENID_NS, k)
self.failUnlessEqual(
expected, actual,
"%s: expected %s, got %s" % (k, expected, actual))
self.failUnless(answer.fields.hasKey(OPENID_NS, 'response_nonce'))
self.failUnlessEqual(answer.fields.getOpenIDNamespace(), OPENID1_NS)
self.failUnless(answer.fields.namespaces.isImplicit(OPENID1_NS))
# One for nonce (OpenID v1 namespace is implicit)
self.failUnlessEqual(len(answer.fields.toPostArgs()),
len(expected_list) + 1,
answer.fields.toPostArgs())
def test_answerImmediateDenyOpenID2(self):
"""Look for mode=setup_needed in checkid_immediate negative
response in OpenID 2 case.
See specification Responding to Authentication Requests /
Negative Assertions / In Response to Immediate Requests.
"""
self.request.mode = 'checkid_immediate'
self.request.immediate = True
self.request.claimed_id = 'http://claimed-id.test/'
server_url = "http://setup-url.unittest/"
# crappiting setup_url, you dirty my interface with your presence!
answer = self.request.answer(False, server_url=server_url)
self.failUnlessEqual(answer.request, self.request)
self.failUnlessEqual(len(answer.fields.toPostArgs()), 3, answer.fields)
self.failUnlessEqual(answer.fields.getOpenIDNamespace(), OPENID2_NS)
self.failUnlessEqual(answer.fields.getArg(OPENID_NS, 'mode'),
'setup_needed')
usu = answer.fields.getArg(OPENID_NS, 'user_setup_url')
expected_substr = 'openid.claimed_id=http%3A%2F%2Fclaimed-id.test%2F'
self.failUnless(expected_substr in usu, usu)
def test_answerImmediateDenyOpenID1(self):
"""Look for user_setup_url in checkid_immediate negative
response in OpenID 1 case."""
self.request.message = Message(OPENID1_NS)
self.request.mode = 'checkid_immediate'
self.request.immediate = True
server_url = "http://setup-url.unittest/"
# crappiting setup_url, you dirty my interface with your presence!
answer = self.request.answer(False, server_url=server_url)
self.failUnlessEqual(answer.request, self.request)
self.failUnlessEqual(len(answer.fields.toPostArgs()), 2, answer.fields)
self.failUnlessEqual(answer.fields.getOpenIDNamespace(), OPENID1_NS)
self.failUnless(answer.fields.namespaces.isImplicit(OPENID1_NS))
self.failUnlessEqual(answer.fields.getArg(OPENID_NS, 'mode'), 'id_res')
self.failUnless(answer.fields.getArg(
OPENID_NS, 'user_setup_url', '').startswith(server_url))
def test_answerSetupDeny(self):
answer = self.request.answer(False)
self.failUnlessEqual(answer.fields.getArgs(OPENID_NS), {
'mode': 'cancel',
})
def test_encodeToURL(self):
server_url = 'http://openid-server.unittest/'
result = self.request.encodeToURL(server_url)
# How to check? How about a round-trip test.
base, result_args = result.split('?', 1)
result_args = dict(cgi.parse_qsl(result_args))
message = Message.fromPostArgs(result_args)
rebuilt_request = server.CheckIDRequest.fromMessage(message,
self.server.op_endpoint)
# argh, lousy hack
self.request.message = message
self.failUnlessEqual(rebuilt_request.__dict__, self.request.__dict__)
def test_getCancelURL(self):
url = self.request.getCancelURL()
rt, query_string = url.split('?')
self.failUnlessEqual(self.request.return_to, rt)
query = dict(cgi.parse_qsl(query_string))
self.failUnlessEqual(query, {'openid.mode':'cancel',
'openid.ns':OPENID2_NS})
def test_getCancelURLimmed(self):
self.request.mode = 'checkid_immediate'
self.request.immediate = True
self.failUnlessRaises(ValueError, self.request.getCancelURL)
class TestCheckIDExtension(unittest.TestCase):
def setUp(self):
self.op_endpoint = 'http://endpoint.unittest/ext'
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, self.op_endpoint)
self.request = server.CheckIDRequest(
identity = 'http://bambam.unittest/',
trust_root = 'http://bar.unittest/',
return_to = 'http://bar.unittest/999',
immediate = False,
op_endpoint = self.server.op_endpoint,
)
self.request.message = Message(OPENID2_NS)
self.response = server.OpenIDResponse(self.request)
self.response.fields.setArg(OPENID_NS, 'mode', 'id_res')
self.response.fields.setArg(OPENID_NS, 'blue', 'star')
def test_addField(self):
namespace = 'something:'
self.response.fields.setArg(namespace, 'bright', 'potato')
self.failUnlessEqual(self.response.fields.getArgs(OPENID_NS),
{'blue': 'star',
'mode': 'id_res',
})
self.failUnlessEqual(self.response.fields.getArgs(namespace),
{'bright':'potato'})
def test_addFields(self):
namespace = 'mi5:'
args = {'tangy': 'suspenders',
'bravo': 'inclusion'}
self.response.fields.updateArgs(namespace, args)
self.failUnlessEqual(self.response.fields.getArgs(OPENID_NS),
{'blue': 'star',
'mode': 'id_res',
})
self.failUnlessEqual(self.response.fields.getArgs(namespace), args)
class MockSignatory(object):
isValid = True
def __init__(self, assoc):
self.assocs = [assoc]
def verify(self, assoc_handle, message):
assert message.hasKey(OPENID_NS, "sig")
if (True, assoc_handle) in self.assocs:
return self.isValid
else:
return False
def getAssociation(self, assoc_handle, dumb):
if (dumb, assoc_handle) in self.assocs:
# This isn't a valid implementation for many uses of this
# function, mind you.
return True
else:
return None
def invalidate(self, assoc_handle, dumb):
if (dumb, assoc_handle) in self.assocs:
self.assocs.remove((dumb, assoc_handle))
class TestCheckAuth(unittest.TestCase):
def setUp(self):
self.assoc_handle = 'mooooooooo'
self.message = Message.fromPostArgs({
'openid.sig': 'signarture',
'one': 'alpha',
'two': 'beta',
})
self.request = server.CheckAuthRequest(
self.assoc_handle, self.message)
self.signatory = MockSignatory((True, self.assoc_handle))
def test_valid(self):
r = self.request.answer(self.signatory)
self.failUnlessEqual(r.fields.getArgs(OPENID_NS), {'is_valid': 'true'})
self.failUnlessEqual(r.request, self.request)
def test_invalid(self):
self.signatory.isValid = False
r = self.request.answer(self.signatory)
self.failUnlessEqual(r.fields.getArgs(OPENID_NS),
{'is_valid': 'false'})
def test_replay(self):
"""Don't validate the same response twice.
From "Checking the Nonce"::
When using "check_authentication", the OP MUST ensure that an
assertion has not yet been accepted with the same value for
"openid.response_nonce".
In this implementation, the assoc_handle is only valid once. And
nonces are a signed component of the message, so they can't be used
with another handle without breaking the sig.
"""
r = self.request.answer(self.signatory)
r = self.request.answer(self.signatory)
self.failUnlessEqual(r.fields.getArgs(OPENID_NS),
{'is_valid': 'false'})
def test_invalidatehandle(self):
self.request.invalidate_handle = "bogusHandle"
r = self.request.answer(self.signatory)
self.failUnlessEqual(r.fields.getArgs(OPENID_NS),
{'is_valid': 'true',
'invalidate_handle': "bogusHandle"})
self.failUnlessEqual(r.request, self.request)
def test_invalidatehandleNo(self):
assoc_handle = 'goodhandle'
self.signatory.assocs.append((False, 'goodhandle'))
self.request.invalidate_handle = assoc_handle
r = self.request.answer(self.signatory)
self.failUnlessEqual(r.fields.getArgs(OPENID_NS), {'is_valid': 'true'})
class TestAssociate(unittest.TestCase):
# TODO: test DH with non-default values for modulus and gen.
# (important to do because we actually had it broken for a while.)
def setUp(self):
self.request = server.AssociateRequest.fromMessage(
Message.fromPostArgs({}))
self.store = memstore.MemoryStore()
self.signatory = server.Signatory(self.store)
def test_dhSHA1(self):
self.assoc = self.signatory.createAssociation(dumb=False, assoc_type='HMAC-SHA1')
from openid.dh import DiffieHellman
from openid.server.server import DiffieHellmanSHA1ServerSession
consumer_dh = DiffieHellman.fromDefaults()
cpub = consumer_dh.public
server_dh = DiffieHellman.fromDefaults()
session = DiffieHellmanSHA1ServerSession(server_dh, cpub)
self.request = server.AssociateRequest(session, 'HMAC-SHA1')
response = self.request.answer(self.assoc)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg("assoc_type"), "HMAC-SHA1")
self.failUnlessEqual(rfg("assoc_handle"), self.assoc.handle)
self.failIf(rfg("mac_key"))
self.failUnlessEqual(rfg("session_type"), "DH-SHA1")
self.failUnless(rfg("enc_mac_key"))
self.failUnless(rfg("dh_server_public"))
enc_key = rfg("enc_mac_key").decode('base64')
spub = cryptutil.base64ToLong(rfg("dh_server_public"))
secret = consumer_dh.xorSecret(spub, enc_key, cryptutil.sha1)
self.failUnlessEqual(secret, self.assoc.secret)
if not cryptutil.SHA256_AVAILABLE:
warnings.warn("Not running SHA256 tests.")
else:
def test_dhSHA256(self):
self.assoc = self.signatory.createAssociation(
dumb=False, assoc_type='HMAC-SHA256')
from openid.dh import DiffieHellman
from openid.server.server import DiffieHellmanSHA256ServerSession
consumer_dh = DiffieHellman.fromDefaults()
cpub = consumer_dh.public
server_dh = DiffieHellman.fromDefaults()
session = DiffieHellmanSHA256ServerSession(server_dh, cpub)
self.request = server.AssociateRequest(session, 'HMAC-SHA256')
response = self.request.answer(self.assoc)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg("assoc_type"), "HMAC-SHA256")
self.failUnlessEqual(rfg("assoc_handle"), self.assoc.handle)
self.failIf(rfg("mac_key"))
self.failUnlessEqual(rfg("session_type"), "DH-SHA256")
self.failUnless(rfg("enc_mac_key"))
self.failUnless(rfg("dh_server_public"))
enc_key = rfg("enc_mac_key").decode('base64')
spub = cryptutil.base64ToLong(rfg("dh_server_public"))
secret = consumer_dh.xorSecret(spub, enc_key, cryptutil.sha256)
self.failUnlessEqual(secret, self.assoc.secret)
def test_protoError256(self):
from openid.consumer.consumer import \
DiffieHellmanSHA256ConsumerSession
s256_session = DiffieHellmanSHA256ConsumerSession()
invalid_s256 = {'openid.assoc_type':'HMAC-SHA1',
'openid.session_type':'DH-SHA256',}
invalid_s256.update(s256_session.getRequest())
invalid_s256_2 = {'openid.assoc_type':'MONKEY-PIRATE',
'openid.session_type':'DH-SHA256',}
invalid_s256_2.update(s256_session.getRequest())
bad_request_argss = [
invalid_s256,
invalid_s256_2,
]
for request_args in bad_request_argss:
message = Message.fromPostArgs(request_args)
self.failUnlessRaises(server.ProtocolError,
server.AssociateRequest.fromMessage,
message)
def test_protoError(self):
from openid.consumer.consumer import DiffieHellmanSHA1ConsumerSession
s1_session = DiffieHellmanSHA1ConsumerSession()
invalid_s1 = {'openid.assoc_type':'HMAC-SHA256',
'openid.session_type':'DH-SHA1',}
invalid_s1.update(s1_session.getRequest())
invalid_s1_2 = {'openid.assoc_type':'ROBOT-NINJA',
'openid.session_type':'DH-SHA1',}
invalid_s1_2.update(s1_session.getRequest())
bad_request_argss = [
{'openid.assoc_type':'Wha?'},
invalid_s1,
invalid_s1_2,
]
for request_args in bad_request_argss:
message = Message.fromPostArgs(request_args)
self.failUnlessRaises(server.ProtocolError,
server.AssociateRequest.fromMessage,
message)
def test_protoErrorFields(self):
contact = 'user@example.invalid'
reference = 'Trac ticket number MAX_INT'
error = 'poltergeist'
openid1_args = {
'openid.identitiy': 'invalid',
'openid.mode': 'checkid_setup',
}
openid2_args = dict(openid1_args)
openid2_args.update({'openid.ns': OPENID2_NS})
# Check presence of optional fields in both protocol versions
openid1_msg = Message.fromPostArgs(openid1_args)
p = server.ProtocolError(openid1_msg, error,
contact=contact, reference=reference)
reply = p.toMessage()
self.failUnlessEqual(reply.getArg(OPENID_NS, 'reference'), reference)
self.failUnlessEqual(reply.getArg(OPENID_NS, 'contact'), contact)
openid2_msg = Message.fromPostArgs(openid2_args)
p = server.ProtocolError(openid2_msg, error,
contact=contact, reference=reference)
reply = p.toMessage()
self.failUnlessEqual(reply.getArg(OPENID_NS, 'reference'), reference)
self.failUnlessEqual(reply.getArg(OPENID_NS, 'contact'), contact)
def failUnlessExpiresInMatches(self, msg, expected_expires_in):
expires_in_str = msg.getArg(OPENID_NS, 'expires_in', no_default)
expires_in = int(expires_in_str)
# Slop is necessary because the tests can sometimes get run
# right on a second boundary
slop = 1 # second
difference = expected_expires_in - expires_in
error_message = ('"expires_in" value not within %s of expected: '
'expected=%s, actual=%s' %
(slop, expected_expires_in, expires_in))
self.failUnless(0 <= difference <= slop, error_message)
def test_plaintext(self):
self.assoc = self.signatory.createAssociation(dumb=False, assoc_type='HMAC-SHA1')
response = self.request.answer(self.assoc)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg("assoc_type"), "HMAC-SHA1")
self.failUnlessEqual(rfg("assoc_handle"), self.assoc.handle)
self.failUnlessExpiresInMatches(
response.fields, self.signatory.SECRET_LIFETIME)
self.failUnlessEqual(
rfg("mac_key"), oidutil.toBase64(self.assoc.secret))
self.failIf(rfg("session_type"))
self.failIf(rfg("enc_mac_key"))
self.failIf(rfg("dh_server_public"))
def test_plaintext_v2(self):
# The main difference between this and the v1 test is that
# session_type is always returned in v2.
args = {
'openid.ns': OPENID2_NS,
'openid.mode': 'associate',
'openid.assoc_type': 'HMAC-SHA1',
'openid.session_type': 'no-encryption',
}
self.request = server.AssociateRequest.fromMessage(
Message.fromPostArgs(args))
self.failIf(self.request.message.isOpenID1())
self.assoc = self.signatory.createAssociation(
dumb=False, assoc_type='HMAC-SHA1')
response = self.request.answer(self.assoc)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg("assoc_type"), "HMAC-SHA1")
self.failUnlessEqual(rfg("assoc_handle"), self.assoc.handle)
self.failUnlessExpiresInMatches(
response.fields, self.signatory.SECRET_LIFETIME)
self.failUnlessEqual(
rfg("mac_key"), oidutil.toBase64(self.assoc.secret))
self.failUnlessEqual(rfg("session_type"), "no-encryption")
self.failIf(rfg("enc_mac_key"))
self.failIf(rfg("dh_server_public"))
def test_plaintext256(self):
self.assoc = self.signatory.createAssociation(dumb=False, assoc_type='HMAC-SHA256')
response = self.request.answer(self.assoc)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg("assoc_type"), "HMAC-SHA1")
self.failUnlessEqual(rfg("assoc_handle"), self.assoc.handle)
self.failUnlessExpiresInMatches(
response.fields, self.signatory.SECRET_LIFETIME)
self.failUnlessEqual(
rfg("mac_key"), oidutil.toBase64(self.assoc.secret))
self.failIf(rfg("session_type"))
self.failIf(rfg("enc_mac_key"))
self.failIf(rfg("dh_server_public"))
def test_unsupportedPrefer(self):
allowed_assoc = 'COLD-PET-RAT'
allowed_sess = 'FROG-BONES'
message = 'This is a unit test'
# Set an OpenID 2 message so answerUnsupported doesn't raise
# ProtocolError.
self.request.message = Message(OPENID2_NS)
response = self.request.answerUnsupported(
message=message,
preferred_session_type=allowed_sess,
preferred_association_type=allowed_assoc,
)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg('error_code'), 'unsupported-type')
self.failUnlessEqual(rfg('assoc_type'), allowed_assoc)
self.failUnlessEqual(rfg('error'), message)
self.failUnlessEqual(rfg('session_type'), allowed_sess)
def test_unsupported(self):
message = 'This is a unit test'
# Set an OpenID 2 message so answerUnsupported doesn't raise
# ProtocolError.
self.request.message = Message(OPENID2_NS)
response = self.request.answerUnsupported(message)
rfg = lambda f: response.fields.getArg(OPENID_NS, f)
self.failUnlessEqual(rfg('error_code'), 'unsupported-type')
self.failUnlessEqual(rfg('assoc_type'), None)
self.failUnlessEqual(rfg('error'), message)
self.failUnlessEqual(rfg('session_type'), None)
class Counter(object):
def __init__(self):
self.count = 0
def inc(self):
self.count += 1
class TestServer(unittest.TestCase, CatchLogs):
def setUp(self):
self.store = memstore.MemoryStore()
self.server = server.Server(self.store, "http://server.unittest/endpt")
CatchLogs.setUp(self)
def test_dispatch(self):
monkeycalled = Counter()
def monkeyDo(request):
monkeycalled.inc()
r = server.OpenIDResponse(request)
return r
self.server.openid_monkeymode = monkeyDo
request = server.OpenIDRequest()
request.mode = "monkeymode"
request.namespace = OPENID1_NS
webresult = self.server.handleRequest(request)
self.failUnlessEqual(monkeycalled.count, 1)
def test_associate(self):
request = server.AssociateRequest.fromMessage(Message.fromPostArgs({}))
response = self.server.openid_associate(request)
self.failUnless(response.fields.hasKey(OPENID_NS, "assoc_handle"),
"No assoc_handle here: %s" % (response.fields,))
def test_associate2(self):
"""Associate when the server has no allowed association types
Gives back an error with error_code and no fallback session or
assoc types."""
self.server.negotiator.setAllowedTypes([])
# Set an OpenID 2 message so answerUnsupported doesn't raise
# ProtocolError.
msg = Message.fromPostArgs({
'openid.ns': OPENID2_NS,
'openid.session_type': 'no-encryption',
})
request = server.AssociateRequest.fromMessage(msg)
response = self.server.openid_associate(request)
self.failUnless(response.fields.hasKey(OPENID_NS, "error"))
self.failUnless(response.fields.hasKey(OPENID_NS, "error_code"))
self.failIf(response.fields.hasKey(OPENID_NS, "assoc_handle"))
self.failIf(response.fields.hasKey(OPENID_NS, "assoc_type"))
self.failIf(response.fields.hasKey(OPENID_NS, "session_type"))
def test_associate3(self):
"""Request an assoc type that is not supported when there are
supported types.
Should give back an error message with a fallback type.
"""
self.server.negotiator.setAllowedTypes([('HMAC-SHA256', 'DH-SHA256')])
msg = Message.fromPostArgs({
'openid.ns': OPENID2_NS,
'openid.session_type': 'no-encryption',
})
request = server.AssociateRequest.fromMessage(msg)
response = self.server.openid_associate(request)
self.failUnless(response.fields.hasKey(OPENID_NS, "error"))
self.failUnless(response.fields.hasKey(OPENID_NS, "error_code"))
self.failIf(response.fields.hasKey(OPENID_NS, "assoc_handle"))
self.failUnlessEqual(response.fields.getArg(OPENID_NS, "assoc_type"),
'HMAC-SHA256')
self.failUnlessEqual(response.fields.getArg(OPENID_NS, "session_type"),
'DH-SHA256')
if not cryptutil.SHA256_AVAILABLE:
warnings.warn("Not running SHA256 tests.")
else:
def test_associate4(self):
"""DH-SHA256 association session"""
self.server.negotiator.setAllowedTypes(
[('HMAC-SHA256', 'DH-SHA256')])
query = {
'openid.dh_consumer_public':
'ALZgnx8N5Lgd7pCj8K86T/DDMFjJXSss1SKoLmxE72kJTzOtG6I2PaYrHX'
'xku4jMQWSsGfLJxwCZ6280uYjUST/9NWmuAfcrBfmDHIBc3H8xh6RBnlXJ'
'1WxJY3jHd5k1/ZReyRZOxZTKdF/dnIqwF8ZXUwI6peV0TyS/K1fOfF/s',
'openid.assoc_type': 'HMAC-SHA256',
'openid.session_type': 'DH-SHA256',
}
message = Message.fromPostArgs(query)
request = server.AssociateRequest.fromMessage(message)
response = self.server.openid_associate(request)
self.failUnless(response.fields.hasKey(OPENID_NS, "assoc_handle"))
def test_missingSessionTypeOpenID2(self):
"""Make sure session_type is required in OpenID 2"""
msg = Message.fromPostArgs({
'openid.ns': OPENID2_NS,
})
self.assertRaises(server.ProtocolError,
server.AssociateRequest.fromMessage, msg)
def test_checkAuth(self):
request = server.CheckAuthRequest('arrrrrf', '0x3999', [])
response = self.server.openid_check_authentication(request)
self.failUnless(response.fields.hasKey(OPENID_NS, "is_valid"))
class TestSignatory(unittest.TestCase, CatchLogs):
def setUp(self):
self.store = memstore.MemoryStore()
self.signatory = server.Signatory(self.store)
self._dumb_key = self.signatory._dumb_key
self._normal_key = self.signatory._normal_key
CatchLogs.setUp(self)
def test_sign(self):
request = server.OpenIDRequest()
assoc_handle = '{assoc}{lookatme}'
self.store.storeAssociation(
self._normal_key,
association.Association.fromExpiresIn(60, assoc_handle,
'sekrit', 'HMAC-SHA1'))
request.assoc_handle = assoc_handle
request.namespace = OPENID1_NS
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'foo': 'amsigned',
'bar': 'notsigned',
'azu': 'alsosigned',
})
sresponse = self.signatory.sign(response)
self.failUnlessEqual(
sresponse.fields.getArg(OPENID_NS, 'assoc_handle'),
assoc_handle)
self.failUnlessEqual(sresponse.fields.getArg(OPENID_NS, 'signed'),
'assoc_handle,azu,bar,foo,signed')
self.failUnless(sresponse.fields.getArg(OPENID_NS, 'sig'))
self.failIf(self.messages, self.messages)
def test_signDumb(self):
request = server.OpenIDRequest()
request.assoc_handle = None
request.namespace = OPENID2_NS
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'foo': 'amsigned',
'bar': 'notsigned',
'azu': 'alsosigned',
'ns':OPENID2_NS,
})
sresponse = self.signatory.sign(response)
assoc_handle = sresponse.fields.getArg(OPENID_NS, 'assoc_handle')
self.failUnless(assoc_handle)
assoc = self.signatory.getAssociation(assoc_handle, dumb=True)
self.failUnless(assoc)
self.failUnlessEqual(sresponse.fields.getArg(OPENID_NS, 'signed'),
'assoc_handle,azu,bar,foo,ns,signed')
self.failUnless(sresponse.fields.getArg(OPENID_NS, 'sig'))
self.failIf(self.messages, self.messages)
def test_signExpired(self):
"""Sign a response to a message with an expired handle (using invalidate_handle).
From "Verifying with an Association"::
If an authentication request included an association handle for an
association between the OP and the Relying party, and the OP no
longer wishes to use that handle (because it has expired or the
secret has been compromised, for instance), the OP will send a
response that must be verified directly with the OP, as specified
in Section 11.3.2. In that instance, the OP will include the field
"openid.invalidate_handle" set to the association handle that the
Relying Party included with the original request.
"""
request = server.OpenIDRequest()
request.namespace = OPENID2_NS
assoc_handle = '{assoc}{lookatme}'
self.store.storeAssociation(
self._normal_key,
association.Association.fromExpiresIn(-10, assoc_handle,
'sekrit', 'HMAC-SHA1'))
self.failUnless(self.store.getAssociation(self._normal_key, assoc_handle))
request.assoc_handle = assoc_handle
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'foo': 'amsigned',
'bar': 'notsigned',
'azu': 'alsosigned',
})
sresponse = self.signatory.sign(response)
new_assoc_handle = sresponse.fields.getArg(OPENID_NS, 'assoc_handle')
self.failUnless(new_assoc_handle)
self.failIfEqual(new_assoc_handle, assoc_handle)
self.failUnlessEqual(
sresponse.fields.getArg(OPENID_NS, 'invalidate_handle'),
assoc_handle)
self.failUnlessEqual(sresponse.fields.getArg(OPENID_NS, 'signed'),
'assoc_handle,azu,bar,foo,invalidate_handle,signed')
self.failUnless(sresponse.fields.getArg(OPENID_NS, 'sig'))
# make sure the expired association is gone
self.failIf(self.store.getAssociation(self._normal_key, assoc_handle),
"expired association is still retrievable.")
# make sure the new key is a dumb mode association
self.failUnless(self.store.getAssociation(self._dumb_key, new_assoc_handle))
self.failIf(self.store.getAssociation(self._normal_key, new_assoc_handle))
self.failUnless(self.messages)
def test_signInvalidHandle(self):
request = server.OpenIDRequest()
request.namespace = OPENID2_NS
assoc_handle = '{bogus-assoc}{notvalid}'
request.assoc_handle = assoc_handle
response = server.OpenIDResponse(request)
response.fields = Message.fromOpenIDArgs({
'foo': 'amsigned',
'bar': 'notsigned',
'azu': 'alsosigned',
})
sresponse = self.signatory.sign(response)
new_assoc_handle = sresponse.fields.getArg(OPENID_NS, 'assoc_handle')
self.failUnless(new_assoc_handle)
self.failIfEqual(new_assoc_handle, assoc_handle)
self.failUnlessEqual(
sresponse.fields.getArg(OPENID_NS, 'invalidate_handle'),
assoc_handle)
self.failUnlessEqual(
sresponse.fields.getArg(OPENID_NS, 'signed'), 'assoc_handle,azu,bar,foo,invalidate_handle,signed')
self.failUnless(sresponse.fields.getArg(OPENID_NS, 'sig'))
# make sure the new key is a dumb mode association
self.failUnless(self.store.getAssociation(self._dumb_key, new_assoc_handle))
self.failIf(self.store.getAssociation(self._normal_key, new_assoc_handle))
self.failIf(self.messages, self.messages)
def test_verify(self):
assoc_handle = '{vroom}{zoom}'
assoc = association.Association.fromExpiresIn(
60, assoc_handle, 'sekrit', 'HMAC-SHA1')
self.store.storeAssociation(self._dumb_key, assoc)
signed = Message.fromPostArgs({
'openid.foo': 'bar',
'openid.apple': 'orange',
'openid.assoc_handle': assoc_handle,
'openid.signed': 'apple,assoc_handle,foo,signed',
'openid.sig': 'uXoT1qm62/BB09Xbj98TQ8mlBco=',
})
verified = self.signatory.verify(assoc_handle, signed)
self.failIf(self.messages, self.messages)
self.failUnless(verified)
def test_verifyBadSig(self):
assoc_handle = '{vroom}{zoom}'
assoc = association.Association.fromExpiresIn(
60, assoc_handle, 'sekrit', 'HMAC-SHA1')
self.store.storeAssociation(self._dumb_key, assoc)
signed = Message.fromPostArgs({
'openid.foo': 'bar',
'openid.apple': 'orange',
'openid.assoc_handle': assoc_handle,
'openid.signed': 'apple,assoc_handle,foo,signed',
'openid.sig': 'uXoT1qm62/BB09Xbj98TQ8mlBco='.encode('rot13'),
})
verified = self.signatory.verify(assoc_handle, signed)
self.failIf(self.messages, self.messages)
self.failIf(verified)
def test_verifyBadHandle(self):
assoc_handle = '{vroom}{zoom}'
signed = Message.fromPostArgs({
'foo': 'bar',
'apple': 'orange',
'openid.sig': "Ylu0KcIR7PvNegB/K41KpnRgJl0=",
})
verified = self.signatory.verify(assoc_handle, signed)
self.failIf(verified)
self.failUnless(self.messages)
def test_verifyAssocMismatch(self):
"""Attempt to validate sign-all message with a signed-list assoc."""
assoc_handle = '{vroom}{zoom}'
assoc = association.Association.fromExpiresIn(
60, assoc_handle, 'sekrit', 'HMAC-SHA1')
self.store.storeAssociation(self._dumb_key, assoc)
signed = Message.fromPostArgs({
'foo': 'bar',
'apple': 'orange',
'openid.sig': "d71xlHtqnq98DonoSgoK/nD+QRM=",
})
verified = self.signatory.verify(assoc_handle, signed)
self.failIf(verified)
self.failUnless(self.messages)
def test_getAssoc(self):
assoc_handle = self.makeAssoc(dumb=True)
assoc = self.signatory.getAssociation(assoc_handle, True)
self.failUnless(assoc)
self.failUnlessEqual(assoc.handle, assoc_handle)
self.failIf(self.messages, self.messages)
def test_getAssocExpired(self):
assoc_handle = self.makeAssoc(dumb=True, lifetime=-10)
assoc = self.signatory.getAssociation(assoc_handle, True)
self.failIf(assoc, assoc)
self.failUnless(self.messages)
def test_getAssocInvalid(self):
ah = 'no-such-handle'
self.failUnlessEqual(
self.signatory.getAssociation(ah, dumb=False), None)
self.failIf(self.messages, self.messages)
def test_getAssocDumbVsNormal(self):
"""getAssociation(dumb=False) cannot get a dumb assoc"""
assoc_handle = self.makeAssoc(dumb=True)
self.failUnlessEqual(
self.signatory.getAssociation(assoc_handle, dumb=False), None)
self.failIf(self.messages, self.messages)
def test_getAssocNormalVsDumb(self):
"""getAssociation(dumb=True) cannot get a shared assoc
From "Verifying Directly with the OpenID Provider"::
An OP MUST NOT verify signatures for associations that have shared
MAC keys.
"""
assoc_handle = self.makeAssoc(dumb=False)
self.failUnlessEqual(
self.signatory.getAssociation(assoc_handle, dumb=True), None)
self.failIf(self.messages, self.messages)
def test_createAssociation(self):
assoc = self.signatory.createAssociation(dumb=False)
self.failUnless(self.signatory.getAssociation(assoc.handle, dumb=False))
self.failIf(self.messages, self.messages)
def makeAssoc(self, dumb, lifetime=60):
assoc_handle = '{bling}'
assoc = association.Association.fromExpiresIn(lifetime, assoc_handle,
'sekrit', 'HMAC-SHA1')
self.store.storeAssociation((dumb and self._dumb_key) or self._normal_key, assoc)
return assoc_handle
def test_invalidate(self):
assoc_handle = '-squash-'
assoc = association.Association.fromExpiresIn(60, assoc_handle,
'sekrit', 'HMAC-SHA1')
self.store.storeAssociation(self._dumb_key, assoc)
assoc = self.signatory.getAssociation(assoc_handle, dumb=True)
self.failUnless(assoc)
assoc = self.signatory.getAssociation(assoc_handle, dumb=True)
self.failUnless(assoc)
self.signatory.invalidate(assoc_handle, dumb=True)
assoc = self.signatory.getAssociation(assoc_handle, dumb=True)
self.failIf(assoc)
self.failIf(self.messages, self.messages)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
zhester/hzpy | examples/parseriff.py | 1 | 2368 | #!/usr/bin/env python
"""
Example RIFF (WAV contents) Data Parser
Sample data is written to a CSV file for analysis.
If matplotlib and numpy are available, signal plots (DFTs) are generated.
"""
import math
import os
import struct
import wave
try:
import matplotlib.pyplot as plot
import numpy
import numpy.fft as fft
except ImportError:
numeric_packages = False
else:
numeric_packages = True
#=============================================================================
def frame2mag( frame ):
( i, q ) = struct.unpack( '<BB', frame )
return math.sqrt( ( i ** 2 ) + ( q ** 2 ) )
#=============================================================================
def main( argv ):
""" Script execution entry point """
# check usage
if len( argv ) < 2:
print 'You must specify at least an input file.'
return 0
# start and length
start = 0
length = 1024
if len( argv ) > 2:
start = int( argv[ 2 ] )
if len( argv ) > 3:
length = int( argv[ 3 ] )
# open file using wave module
wfile = wave.open( argv[ 1 ], 'rb' )
# print file info
print 'Channels: %d\nSample width: %d\nFrame rate: %d\nFrames: %d' % (
wfile.getnchannels(),
wfile.getsampwidth(),
wfile.getframerate(),
wfile.getnframes()
)
# check for starting offset
if start > 0:
junk = wfile.readframes( start )
# read frames
frames = wfile.readframes( length )
samples = []
for i in range( length ):
index = i * 2
samples.append( frame2mag( frames[ index : ( index + 2 ) ] ) )
# close wave file
wfile.close()
# plot
if numeric_packages == True:
fft_data = fft.fft( samples[ : 1024 ] )
mags = numpy.absolute( fft_data )
mags_db = [ 20 * numpy.log10( mag ) for mag in mags ]
plot.figure( 1 )
plot.plot( samples )
plot.figure( 2 )
plot.plot( mags_db )
plot.show()
# output
oname = argv[ 1 ].replace( '.wav', '.csv' )
ofile = open( oname, 'wb' )
for sample in samples:
ofile.write( '%d\n' % sample )
ofile.close()
# Return success.
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
| bsd-2-clause |
AnnaWyszomirska/lesson1_1 | tests/test_remove_contact_from_group.py | 1 | 1949 | from model.contact import Contact
from model.group import Group
import random
def test_remove_contact_from_group(app, db, orm):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="New one"))
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(firstname="Anna", middlename="Joanna", lastname="Wyszomirska", nickname="aneczka",
title="Title", company="New Company", address="My address information",
home="34725475263", mobile="32456234236", work="2364623645", fax="243256452",
email="aniawzs@wp.pl", email2="test1@gmail.com", email3="test2@gmail.com",
homepage="Test", birthyear="1990", annyear="2016",
bday="//div[@id='content']/form/select[1]//option[4]",
bmonth= "//div[@id='content']/form/select[2]//option[3]",
aday="//div[@id='content']/form/select[3]//option[19]",
amonth="//div[@id='content']/form/select[4]//option[3]",
address2="My second address ", privatephone="23415257354735",
comments="Brak uwag"
))
group_id = app.group.random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) == 0:
ui_list = app.contact.get_contact_list()
contact = random.choice(ui_list)
app.contact.add_contact_into_group(contact.id, group_id)
contact = random.choice(contacts_in_group)
app.contact.remove_contacts_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_orm = orm.get_contacts_in_group(Group(id=group_id))
assert sorted(contact_ui, key=Contact.id_or_max) == sorted(contact_orm, key=Contact.id_or_max) | apache-2.0 |
Mattze96/youtube-dl | youtube_dl/extractor/nextmedia.py | 84 | 6336 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_iso8601
class NextMediaIE(InfoExtractor):
IE_DESC = '蘋果日報'
_VALID_URL = r'http://hk.apple.nextmedia.com/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://hk.apple.nextmedia.com/realtime/news/20141108/53109199',
'md5': 'dff9fad7009311c421176d1ac90bfe4f',
'info_dict': {
'id': '53109199',
'ext': 'mp4',
'title': '【佔領金鐘】50外國領事議員撐場 讚學生勇敢香港有希望',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:28222b9912b6665a21011b034c70fcc7',
'timestamp': 1415456273,
'upload_date': '20141108',
}
}]
_URL_PATTERN = r'\{ url: \'(.+)\' \}'
def _real_extract(self, url):
news_id = self._match_id(url)
page = self._download_webpage(url, news_id)
return self._extract_from_nextmedia_page(news_id, url, page)
def _extract_from_nextmedia_page(self, news_id, url, page):
title = self._fetch_title(page)
video_url = self._search_regex(self._URL_PATTERN, page, 'video url')
attrs = {
'id': news_id,
'title': title,
'url': video_url, # ext can be inferred from url
'thumbnail': self._fetch_thumbnail(page),
'description': self._fetch_description(page),
}
timestamp = self._fetch_timestamp(page)
if timestamp:
attrs['timestamp'] = timestamp
else:
attrs['upload_date'] = self._fetch_upload_date(url)
return attrs
def _fetch_title(self, page):
return self._og_search_title(page)
def _fetch_thumbnail(self, page):
return self._og_search_thumbnail(page)
def _fetch_timestamp(self, page):
dateCreated = self._search_regex('"dateCreated":"([^"]+)"', page, 'created time')
return parse_iso8601(dateCreated)
def _fetch_upload_date(self, url):
return self._search_regex(self._VALID_URL, url, 'upload date', group='date')
def _fetch_description(self, page):
return self._og_search_property('description', page)
class NextMediaActionNewsIE(NextMediaIE):
IE_DESC = '蘋果日報 - 動新聞'
_VALID_URL = r'http://hk.dv.nextmedia.com/actionnews/[^/]+/(?P<date>\d+)/(?P<id>\d+)/\d+'
_TESTS = [{
'url': 'http://hk.dv.nextmedia.com/actionnews/hit/20150121/19009428/20061460',
'md5': '05fce8ffeed7a5e00665d4b7cf0f9201',
'info_dict': {
'id': '19009428',
'ext': 'mp4',
'title': '【壹週刊】細10年男友偷食 50歲邵美琪再失戀',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:cd802fad1f40fd9ea178c1e2af02d659',
'timestamp': 1421791200,
'upload_date': '20150120',
}
}]
def _real_extract(self, url):
news_id = self._match_id(url)
actionnews_page = self._download_webpage(url, news_id)
article_url = self._og_search_url(actionnews_page)
article_page = self._download_webpage(article_url, news_id)
return self._extract_from_nextmedia_page(news_id, url, article_page)
class AppleDailyIE(NextMediaIE):
IE_DESC = '臺灣蘋果日報'
_VALID_URL = r'http://(www|ent).appledaily.com.tw/(?:animation|appledaily|enews|realtimenews)/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)(/.*)?'
_TESTS = [{
'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694',
'md5': 'a843ab23d150977cc55ef94f1e2c1e4d',
'info_dict': {
'id': '36354694',
'ext': 'mp4',
'title': '周亭羽走過摩鐵陰霾2男陪吃 九把刀孤寒看醫生',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:2acd430e59956dc47cd7f67cb3c003f4',
'upload_date': '20150128',
}
}, {
'url': 'http://www.appledaily.com.tw/realtimenews/article/strange/20150128/550549/%E4%B8%8D%E6%BB%BF%E8%A2%AB%E8%B8%A9%E8%85%B3%E3%80%80%E5%B1%B1%E6%9D%B1%E5%85%A9%E5%A4%A7%E5%AA%BD%E4%B8%80%E8%B7%AF%E6%89%93%E4%B8%8B%E8%BB%8A',
'md5': '86b4e9132d158279c7883822d94ccc49',
'info_dict': {
'id': '550549',
'ext': 'mp4',
'title': '不滿被踩腳 山東兩大媽一路打下車',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:175b4260c1d7c085993474217e4ab1b4',
'upload_date': '20150128',
}
}, {
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003671',
'md5': '03df296d95dedc2d5886debbb80cb43f',
'info_dict': {
'id': '5003671',
'ext': 'mp4',
'title': '20正妹熱舞 《刀龍傳說Online》火辣上市',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd',
'upload_date': '20150128',
}
}, {
# No thumbnail
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/',
'md5': 'b06182cd386ea7bc6115ec7ff0f72aeb',
'info_dict': {
'id': '5003673',
'ext': 'mp4',
'title': '半夜尿尿 好像會看到___',
'description': 'md5:61d2da7fe117fede148706cdb85ac066',
'upload_date': '20150128',
},
'expected_warnings': [
'video thumbnail',
]
}, {
'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/',
'only_matching': True,
}]
_URL_PATTERN = r'\{url: \'(.+)\'\}'
def _fetch_title(self, page):
return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or
self._html_search_meta('description', page, 'news title'))
def _fetch_thumbnail(self, page):
return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)
def _fetch_timestamp(self, page):
return None
def _fetch_description(self, page):
return self._html_search_meta('description', page, 'news description')
| unlicense |
tkajtoch/python-social-auth | social/backends/qq.py | 9 | 2198 | """
Created on May 13, 2014
@author: Yong Zhang (zyfyfe@gmail.com)
"""
import json
from social.utils import parse_qs
from social.backends.oauth import BaseOAuth2
class QQOAuth2(BaseOAuth2):
name = 'qq'
ID_KEY = 'openid'
AUTHORIZE_URL = 'https://graph.qq.com/oauth2.0/authorize'
ACCESS_TOKEN_URL = 'https://graph.qq.com/oauth2.0/token'
AUTHORIZATION_URL = 'https://graph.qq.com/oauth2.0/authorize'
OPENID_URL = 'https://graph.qq.com/oauth2.0/me'
REDIRECT_STATE = False
EXTRA_DATA = [
('nickname', 'username'),
('figureurl_qq_1', 'profile_image_url'),
('gender', 'gender')
]
def get_user_details(self, response):
"""
Return user detail from QQ account sometimes nickname will duplicate
with another qq account, to avoid this issue it's possible to use
openid as username.
"""
if self.setting('USE_OPENID_AS_USERNAME', False):
username = response.get('openid', '')
else:
username = response.get('nickname', '')
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('nickname', '')
)
return {
'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def get_openid(self, access_token):
response = self.request(self.OPENID_URL, params={
'access_token': access_token
})
data = json.loads(response.content[10:-3])
return data['openid']
def user_data(self, access_token, *args, **kwargs):
openid = self.get_openid(access_token)
response = self.get_json(
'https://graph.qq.com/user/get_user_info', params={
'access_token': access_token,
'oauth_consumer_key': self.setting('SOCIAL_AUTH_QQ_KEY'),
'openid': openid
}
)
response['openid'] = openid
return response
def request_access_token(self, url, data, *args, **kwargs):
response = self.request(url, params=data, *args, **kwargs)
return parse_qs(response.content)
| bsd-3-clause |
JohnDenker/brython | www/src/Lib/test/test_trace.py | 109 | 14823 | import os
import io
import sys
from test.support import (run_unittest, TESTFN, rmtree, unlink,
captured_stdout)
import unittest
import trace
from trace import CoverageResults, Trace
from test.tracedmodules import testmod
#------------------------------- Utilities -----------------------------------#
def fix_ext_py(filename):
"""Given a .pyc/.pyo filename converts it to the appropriate .py"""
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
return filename
def my_file_and_modname():
"""The .py file and module name of this file (__file__)"""
modname = os.path.splitext(os.path.basename(__file__))[0]
return fix_ext_py(__file__), modname
def get_firstlineno(func):
return func.__code__.co_firstlineno
#-------------------- Target functions for tracing ---------------------------#
#
# The relative line numbers of lines in these functions matter for verifying
# tracing. Please modify the appropriate tests if you change one of the
# functions. Absolute line numbers don't matter.
#
def traced_func_linear(x, y):
a = x
b = y
c = a + b
return c
def traced_func_loop(x, y):
c = x
for i in range(5):
c += y
return c
def traced_func_importing(x, y):
return x + y + testmod.func(1)
def traced_func_simple_caller(x):
c = traced_func_linear(x, x)
return c + x
def traced_func_importing_caller(x):
k = traced_func_simple_caller(x)
k += traced_func_importing(k, x)
return k
def traced_func_generator(num):
c = 5 # executed once
for i in range(num):
yield i + c
def traced_func_calling_generator():
k = 0
for i in traced_func_generator(10):
k += i
def traced_doubler(num):
return num * 2
def traced_caller_list_comprehension():
k = 10
mylist = [traced_doubler(i) for i in range(k)]
return mylist
class TracedClass(object):
def __init__(self, x):
self.a = x
def inst_method_linear(self, y):
return self.a + y
def inst_method_calling(self, x):
c = self.inst_method_linear(x)
return c + traced_func_linear(x, c)
@classmethod
def class_method_linear(cls, y):
return y * 2
@staticmethod
def static_method_linear(y):
return y * 2
#------------------------------ Test cases -----------------------------------#
class TestLineCounts(unittest.TestCase):
"""White-box testing of line-counting, via runfunc"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
self.my_py_filename = fix_ext_py(__file__)
def test_traced_func_linear(self):
result = self.tracer.runfunc(traced_func_linear, 2, 5)
self.assertEqual(result, 7)
# all lines are executed once
expected = {}
firstlineno = get_firstlineno(traced_func_linear)
for i in range(1, 5):
expected[(self.my_py_filename, firstlineno + i)] = 1
self.assertEqual(self.tracer.results().counts, expected)
def test_traced_func_loop(self):
self.tracer.runfunc(traced_func_loop, 2, 3)
firstlineno = get_firstlineno(traced_func_loop)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(self.my_py_filename, firstlineno + 2): 6,
(self.my_py_filename, firstlineno + 3): 5,
(self.my_py_filename, firstlineno + 4): 1,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_traced_func_importing(self):
self.tracer.runfunc(traced_func_importing, 2, 5)
firstlineno = get_firstlineno(traced_func_importing)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(fix_ext_py(testmod.__file__), 2): 1,
(fix_ext_py(testmod.__file__), 3): 1,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_trace_func_generator(self):
self.tracer.runfunc(traced_func_calling_generator)
firstlineno_calling = get_firstlineno(traced_func_calling_generator)
firstlineno_gen = get_firstlineno(traced_func_generator)
expected = {
(self.my_py_filename, firstlineno_calling + 1): 1,
(self.my_py_filename, firstlineno_calling + 2): 11,
(self.my_py_filename, firstlineno_calling + 3): 10,
(self.my_py_filename, firstlineno_gen + 1): 1,
(self.my_py_filename, firstlineno_gen + 2): 11,
(self.my_py_filename, firstlineno_gen + 3): 10,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_trace_list_comprehension(self):
self.tracer.runfunc(traced_caller_list_comprehension)
firstlineno_calling = get_firstlineno(traced_caller_list_comprehension)
firstlineno_called = get_firstlineno(traced_doubler)
expected = {
(self.my_py_filename, firstlineno_calling + 1): 1,
# List compehentions work differently in 3.x, so the count
# below changed compared to 2.x.
(self.my_py_filename, firstlineno_calling + 2): 12,
(self.my_py_filename, firstlineno_calling + 3): 1,
(self.my_py_filename, firstlineno_called + 1): 10,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_linear_methods(self):
# XXX todo: later add 'static_method_linear' and 'class_method_linear'
# here, once issue1764286 is resolved
#
for methname in ['inst_method_linear',]:
tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
traced_obj = TracedClass(25)
method = getattr(traced_obj, methname)
tracer.runfunc(method, 20)
firstlineno = get_firstlineno(method)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
}
self.assertEqual(tracer.results().counts, expected)
class TestRunExecCounts(unittest.TestCase):
"""A simple sanity test of line-counting, via runctx (exec)"""
def setUp(self):
self.my_py_filename = fix_ext_py(__file__)
self.addCleanup(sys.settrace, sys.gettrace())
def test_exec_counts(self):
self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
code = r'''traced_func_loop(2, 5)'''
code = compile(code, __file__, 'exec')
self.tracer.runctx(code, globals(), vars())
firstlineno = get_firstlineno(traced_func_loop)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(self.my_py_filename, firstlineno + 2): 6,
(self.my_py_filename, firstlineno + 3): 5,
(self.my_py_filename, firstlineno + 4): 1,
}
# When used through 'run', some other spurious counts are produced, like
# the settrace of threading, which we ignore, just making sure that the
# counts fo traced_func_loop were right.
#
for k in expected.keys():
self.assertEqual(self.tracer.results().counts[k], expected[k])
class TestFuncs(unittest.TestCase):
"""White-box testing of funcs tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countfuncs=1)
self.filemod = my_file_and_modname()
def test_simple_caller(self):
self.tracer.runfunc(traced_func_simple_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
self.filemod + ('traced_func_importing_caller',): 1,
self.filemod + ('traced_func_importing',): 1,
(fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_inst_method_calling(self):
obj = TracedClass(20)
self.tracer.runfunc(obj.inst_method_calling, 1)
expected = {
self.filemod + ('TracedClass.inst_method_calling',): 1,
self.filemod + ('TracedClass.inst_method_linear',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
class TestCallers(unittest.TestCase):
"""White-box testing of callers tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countcallers=1)
self.filemod = my_file_and_modname()
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'),
(self.filemod + ('traced_func_importing_caller',))): 1,
((self.filemod + ('traced_func_simple_caller',)),
(self.filemod + ('traced_func_linear',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_simple_caller',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_importing',))): 1,
((self.filemod + ('traced_func_importing',)),
(fix_ext_py(testmod.__file__), 'testmod', 'func')): 1,
}
self.assertEqual(self.tracer.results().callers, expected)
# Created separately for issue #3821
class TestCoverage(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
def tearDown(self):
rmtree(TESTFN)
unlink(TESTFN)
def _coverage(self, tracer,
cmd='from test import test_pprint; test_pprint.test_main()'):
tracer.run(cmd)
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=TESTFN)
def test_coverage(self):
tracer = trace.Trace(trace=0, count=1)
with captured_stdout() as stdout:
self._coverage(tracer)
stdout = stdout.getvalue()
self.assertTrue("pprint.py" in stdout)
self.assertTrue("case.py" in stdout) # from unittest
files = os.listdir(TESTFN)
self.assertTrue("pprint.cover" in files)
self.assertTrue("unittest.case.cover" in files)
def test_coverage_ignore(self):
# Ignore all files, nothing should be traced nor printed
libpath = os.path.normpath(os.path.dirname(os.__file__))
# sys.prefix does not work when running from a checkout
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
libpath], trace=0, count=1)
with captured_stdout() as stdout:
self._coverage(tracer)
if os.path.exists(TESTFN):
files = os.listdir(TESTFN)
self.assertEqual(files, ['_importlib.cover']) # Ignore __import__
def test_issue9936(self):
tracer = trace.Trace(trace=0, count=1)
modname = 'test.tracedmodules.testmod'
# Ensure that the module is executed in import
if modname in sys.modules:
del sys.modules[modname]
cmd = ("import test.tracedmodules.testmod as t;"
"t.func(0); t.func2();")
with captured_stdout() as stdout:
self._coverage(tracer, cmd)
stdout.seek(0)
stdout.readline()
coverage = {}
for line in stdout:
lines, cov, module = line.split()[:3]
coverage[module] = (int(lines), int(cov[:-1]))
# XXX This is needed to run regrtest.py as a script
modname = trace._fullmodname(sys.modules[modname].__file__)
self.assertIn(modname, coverage)
self.assertEqual(coverage[modname], (5, 100))
### Tests that don't mess with sys.settrace and can be traced
### themselves TODO: Skip tests that do mess with sys.settrace when
### regrtest is invoked with -T option.
class Test_Ignore(unittest.TestCase):
def test_ignored(self):
jn = os.path.join
ignore = trace._Ignore(['x', 'y.z'], [jn('foo', 'bar')])
self.assertTrue(ignore.names('x.py', 'x'))
self.assertFalse(ignore.names('xy.py', 'xy'))
self.assertFalse(ignore.names('y.py', 'y'))
self.assertTrue(ignore.names(jn('foo', 'bar', 'baz.py'), 'baz'))
self.assertFalse(ignore.names(jn('bar', 'z.py'), 'z'))
# Matched before.
self.assertTrue(ignore.names(jn('bar', 'baz.py'), 'baz'))
class TestDeprecatedMethods(unittest.TestCase):
def test_deprecated_usage(self):
sio = io.StringIO()
with self.assertWarns(DeprecationWarning):
trace.usage(sio)
self.assertIn('Usage:', sio.getvalue())
def test_deprecated_Ignore(self):
with self.assertWarns(DeprecationWarning):
trace.Ignore()
def test_deprecated_modname(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual("spam", trace.modname("spam"))
def test_deprecated_fullmodname(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual("spam", trace.fullmodname("spam"))
def test_deprecated_find_lines_from_code(self):
with self.assertWarns(DeprecationWarning):
def foo():
pass
trace.find_lines_from_code(foo.__code__, ["eggs"])
def test_deprecated_find_lines(self):
with self.assertWarns(DeprecationWarning):
def foo():
pass
trace.find_lines(foo.__code__, ["eggs"])
def test_deprecated_find_strings(self):
with open(TESTFN, 'w') as fd:
self.addCleanup(unlink, TESTFN)
with self.assertWarns(DeprecationWarning):
trace.find_strings(fd.name)
def test_deprecated_find_executable_linenos(self):
with open(TESTFN, 'w') as fd:
self.addCleanup(unlink, TESTFN)
with self.assertWarns(DeprecationWarning):
trace.find_executable_linenos(fd.name)
def test_main():
run_unittest(__name__)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
srpepperoni/libGDX | extensions/gdx-freetype/jni/freetype-2.5.5/src/tools/docmaker/docbeauty.py | 877 | 2642 | #!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| apache-2.0 |
tomer8007/kik-bot-api-unofficial | kik_unofficial/protobuf/entity/v1/entity_service_pb2.py | 1 | 65624 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: entity/v1/entity_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
from kik_unofficial.protobuf.entity.v1 import entity_common_pb2 as entity_dot_v1_dot_entity__common__pb2
from kik_unofficial.protobuf.common.v1 import model_pb2 as common_dot_v1_dot_model__pb2
from kik_unofficial.protobuf.entity.v1 import element_common_pb2 as entity_dot_v1_dot_element__common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='entity/v1/entity_service.proto',
package='mobile.entity.v1',
syntax='proto3',
serialized_pb=_b('\n\x1e\x65ntity/v1/entity_service.proto\x12\x10mobile.entity.v1\x1a\x12\x63ommon_model.proto\x1a\x19protobuf_validation.proto\x1a\x1d\x65ntity/v1/entity_common.proto\x1a\x15\x63ommon/v1/model.proto\x1a\x1e\x65ntity/v1/element_common.proto\"B\n\x0fGetUsersRequest\x12/\n\x03ids\x18\x01 \x03(\x0b\x32\x15.common.XiBareUserJidB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\xa0\x02\n\x10GetUsersResponse\x12\x39\n\x06result\x18\x01 \x01(\x0e\x32).mobile.entity.v1.GetUsersResponse.Result\x12+\n\x05users\x18\n \x03(\x0b\x32\x1c.common.entity.v1.EntityUser\x12,\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x15.common.XiBareUserJid\x12)\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x15.common.XiBareUserJid\x12,\n\rnot_found_ids\x18\r \x03(\x0b\x32\x15.common.XiBareUserJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"A\n\x0eGetBotsRequest\x12/\n\x03ids\x18\x01 \x03(\x0b\x32\x15.common.XiBareUserJidB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\x9c\x02\n\x0fGetBotsResponse\x12\x38\n\x06result\x18\x01 \x01(\x0e\x32(.mobile.entity.v1.GetBotsResponse.Result\x12)\n\x04\x62ots\x18\n \x03(\x0b\x32\x1b.common.entity.v1.EntityBot\x12,\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x15.common.XiBareUserJid\x12)\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x15.common.XiBareUserJid\x12,\n\rnot_found_ids\x18\r \x03(\x0b\x32\x15.common.XiBareUserJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"N\n\x1bGetUserRosterEntriesRequest\x12/\n\x03ids\x18\x01 \x03(\x0b\x32\x15.common.XiBareUserJidB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\xd1\x02\n\x1cGetUserRosterEntriesResponse\x12\x45\n\x06result\x18\x01 \x01(\x0e\x32\x35.mobile.entity.v1.GetUserRosterEntriesResponse.Result\x12\x44\n\x13user_roster_entries\x18\n \x03(\x0b\x32\'.common.entity.v1.EntityUserRosterEntry\x12,\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x15.common.XiBareUserJid\x12)\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x15.common.XiBareUserJid\x12,\n\rnot_found_ids\x18\r \x03(\x0b\x32\x15.common.XiBareUserJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"@\n\x10GetGroupsRequest\x12,\n\x03ids\x18\x01 \x03(\x0b\x32\x12.common.XiGroupJidB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\x9b\x02\n\x11GetGroupsResponse\x12:\n\x06result\x18\x01 \x01(\x0e\x32*.mobile.entity.v1.GetGroupsResponse.Result\x12-\n\x06groups\x18\n \x03(\x0b\x32\x1d.common.entity.v1.EntityGroup\x12)\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x12.common.XiGroupJid\x12&\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x12.common.XiGroupJid\x12)\n\rnot_found_ids\x18\r \x03(\x0b\x32\x12.common.XiGroupJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"B\n\x10GetConvosRequest\x12.\n\x03ids\x18\x01 \x03(\x0b\x32\x14.common.v1.XiConvoIdB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\xa1\x02\n\x11GetConvosResponse\x12:\n\x06result\x18\x01 \x01(\x0e\x32*.mobile.entity.v1.GetConvosResponse.Result\x12-\n\x06\x63onvos\x18\n \x03(\x0b\x32\x1d.common.entity.v1.EntityConvo\x12+\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x14.common.v1.XiConvoId\x12(\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x14.common.v1.XiConvoId\x12+\n\rnot_found_ids\x18\r \x03(\x0b\x32\x14.common.v1.XiConvoId\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"H\n\x15GetTrustedBotsRequest\x12/\n\npage_token\x18\x01 \x01(\x0b\x32\x1b.mobile.entity.v1.PageToken\"\xd5\x01\n\x16GetTrustedBotsResponse\x12?\n\x06result\x18\x01 \x01(\x0e\x32/.mobile.entity.v1.GetTrustedBotsResponse.Result\x12\x37\n\x0ctrusted_bots\x18\x02 \x03(\x0b\x32\x15.common.XiBareUserJidB\n\xca\x9d%\x06\x08\x00\x80\x01\x88\'\x12/\n\npage_token\x18\x03 \x01(\x0b\x32\x1b.mobile.entity.v1.PageToken\"\x10\n\x06Result\x12\x06\n\x02OK\x10\x00\"\x1a\n\tPageToken\x12\r\n\x05token\x18\x01 \x01(\x0c\"L\n\x1cGetGroupRosterEntriesRequest\x12,\n\x03ids\x18\x01 \x03(\x0b\x32\x12.common.XiGroupJidB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x14\"\xcc\x02\n\x1dGetGroupRosterEntriesResponse\x12\x46\n\x06result\x18\x01 \x01(\x0e\x32\x36.mobile.entity.v1.GetGroupRosterEntriesResponse.Result\x12\x46\n\x14group_roster_entries\x18\n \x03(\x0b\x32(.common.entity.v1.EntityGroupRosterEntry\x12)\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x12.common.XiGroupJid\x12&\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x12.common.XiGroupJid\x12)\n\rnot_found_ids\x18\r \x03(\x0b\x32\x12.common.XiGroupJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\"F\n\x0cRequestedJid\x12*\n\talias_jid\x18\x01 \x01(\x0b\x32\x15.common.v1.XiAliasJidH\x00\x42\n\n\x08jid_type\"P\n\x16GetUsersByAliasRequest\x12\x36\n\x03ids\x18\x01 \x03(\x0b\x32\x1e.mobile.entity.v1.RequestedJidB\t\xca\x9d%\x05\x08\x00\x80\x01\x14\"\xae\x04\n\x18PublicGroupMemberProfile\x12\x31\n\x0b\x62io_element\x18\x01 \x01(\x0b\x32\x1c.common.entity.v1.BioElement\x12\x38\n\x0bprofile_pic\x18\x02 \x01(\x0b\x32#.common.entity.v1.ProfilePicElement\x12`\n background_profile_pic_extension\x18\x03 \x01(\x0b\x32\x36.common.entity.v1.BackgroundProfilePicExtensionElement\x12\x43\n\x14registration_element\x18\x04 \x01(\x0b\x32%.common.entity.v1.RegistrationElement\x12=\n\x11interests_element\x18\x05 \x01(\x0b\x32\".common.entity.v1.InterestsElement\x12\x42\n\x14\x65moji_status_element\x18\x06 \x01(\x0b\x32$.common.entity.v1.EmojiStatusElement\x12:\n\x0c\x64isplay_name\x18\x07 \x01(\x0b\x32$.common.entity.v1.DisplayNameElement\x12?\n\x13kin_user_id_element\x18\x08 \x01(\x0b\x32\".common.entity.v1.KinUserIdElement\"\x8d\x01\n\x15\x41nonChatMemberProfile\x12\x38\n\x0bprofile_pic\x18\x02 \x01(\x0b\x32#.common.entity.v1.ProfilePicElement\x12:\n\x0c\x64isplay_name\x18\x07 \x01(\x0b\x32$.common.entity.v1.DisplayNameElement\"h\n\x0ePrivateProfile\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.common.XiBareUserJid\x12\x33\n\x08username\x18\x02 \x01(\x0b\x32!.common.entity.v1.UsernameElement\"\xb2\x02\n\x16GetUsersByAliasPayload\x12O\n\x1bpublic_group_member_profile\x18\x01 \x01(\x0b\x32*.mobile.entity.v1.PublicGroupMemberProfile\x12I\n\x18\x61non_chat_member_profile\x18\x05 \x01(\x0b\x32\'.mobile.entity.v1.AnonChatMemberProfile\x12\x39\n\x0fprivate_profile\x18\x02 \x01(\x0b\x32 .mobile.entity.v1.PrivateProfile\x12*\n\x02id\x18\x03 \x01(\x0b\x32\x1e.mobile.entity.v1.RequestedJid\x12\x15\n\rrequest_index\x18\x04 \x01(\x05\"\xd8\x02\n\x17GetUsersByAliasResponse\x12@\n\x06result\x18\x01 \x01(\x0e\x32\x30.mobile.entity.v1.GetUsersByAliasResponse.Result\x12:\n\x08payloads\x18\n \x03(\x0b\x32(.mobile.entity.v1.GetUsersByAliasPayload\x12\x35\n\rretriable_ids\x18\x0b \x03(\x0b\x32\x1e.mobile.entity.v1.RequestedJid\x12\x32\n\nfailed_ids\x18\x0c \x03(\x0b\x32\x1e.mobile.entity.v1.RequestedJid\x12\x35\n\rnot_found_ids\x18\r \x03(\x0b\x32\x1e.mobile.entity.v1.RequestedJid\"\x1d\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PARTIAL\x10\x01\x32\x95\x06\n\x06\x45ntity\x12Q\n\x08GetUsers\x12!.mobile.entity.v1.GetUsersRequest\x1a\".mobile.entity.v1.GetUsersResponse\x12N\n\x07GetBots\x12 .mobile.entity.v1.GetBotsRequest\x1a!.mobile.entity.v1.GetBotsResponse\x12T\n\tGetGroups\x12\".mobile.entity.v1.GetGroupsRequest\x1a#.mobile.entity.v1.GetGroupsResponse\x12T\n\tGetConvos\x12\".mobile.entity.v1.GetConvosRequest\x1a#.mobile.entity.v1.GetConvosResponse\x12u\n\x14GetUserRosterEntries\x12-.mobile.entity.v1.GetUserRosterEntriesRequest\x1a..mobile.entity.v1.GetUserRosterEntriesResponse\x12x\n\x15GetGroupRosterEntries\x12..mobile.entity.v1.GetGroupRosterEntriesRequest\x1a/.mobile.entity.v1.GetGroupRosterEntriesResponse\x12\x66\n\x0fGetUsersByAlias\x12(.mobile.entity.v1.GetUsersByAliasRequest\x1a).mobile.entity.v1.GetUsersByAliasResponse\x12\x63\n\x0eGetTrustedBots\x12\'.mobile.entity.v1.GetTrustedBotsRequest\x1a(.mobile.entity.v1.GetTrustedBotsResponseBc\n\x15\x63om.kik.entity.mobileZJgithub.com/kikinteractive/xiphias-api-mobile/generated/go/entity/v1;entityb\x06proto3')
,
dependencies=[common__model__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,entity_dot_v1_dot_entity__common__pb2.DESCRIPTOR,common_dot_v1_dot_model__pb2.DESCRIPTOR,entity_dot_v1_dot_element__common__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETUSERSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetUsersResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETUSERSRESPONSE_RESULT)
_GETBOTSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetBotsResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETBOTSRESPONSE_RESULT)
_GETUSERROSTERENTRIESRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetUserRosterEntriesResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETUSERROSTERENTRIESRESPONSE_RESULT)
_GETGROUPSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetGroupsResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETGROUPSRESPONSE_RESULT)
_GETCONVOSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetConvosResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETCONVOSRESPONSE_RESULT)
_GETTRUSTEDBOTSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetTrustedBotsResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=529,
)
_sym_db.RegisterEnumDescriptor(_GETTRUSTEDBOTSRESPONSE_RESULT)
_GETGROUPROSTERENTRIESRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETGROUPROSTERENTRIESRESPONSE_RESULT)
_GETUSERSBYALIASRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.entity.v1.GetUsersByAliasResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARTIAL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=513,
serialized_end=542,
)
_sym_db.RegisterEnumDescriptor(_GETUSERSBYALIASRESPONSE_RESULT)
_GETUSERSREQUEST = _descriptor.Descriptor(
name='GetUsersRequest',
full_name='mobile.entity.v1.GetUsersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetUsersRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=185,
serialized_end=251,
)
_GETUSERSRESPONSE = _descriptor.Descriptor(
name='GetUsersResponse',
full_name='mobile.entity.v1.GetUsersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetUsersResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='users', full_name='mobile.entity.v1.GetUsersResponse.users', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetUsersResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetUsersResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetUsersResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETUSERSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=542,
)
_GETBOTSREQUEST = _descriptor.Descriptor(
name='GetBotsRequest',
full_name='mobile.entity.v1.GetBotsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetBotsRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=544,
serialized_end=609,
)
_GETBOTSRESPONSE = _descriptor.Descriptor(
name='GetBotsResponse',
full_name='mobile.entity.v1.GetBotsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetBotsResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bots', full_name='mobile.entity.v1.GetBotsResponse.bots', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetBotsResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetBotsResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetBotsResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETBOTSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=612,
serialized_end=896,
)
_GETUSERROSTERENTRIESREQUEST = _descriptor.Descriptor(
name='GetUserRosterEntriesRequest',
full_name='mobile.entity.v1.GetUserRosterEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetUserRosterEntriesRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=898,
serialized_end=976,
)
_GETUSERROSTERENTRIESRESPONSE = _descriptor.Descriptor(
name='GetUserRosterEntriesResponse',
full_name='mobile.entity.v1.GetUserRosterEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetUserRosterEntriesResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_roster_entries', full_name='mobile.entity.v1.GetUserRosterEntriesResponse.user_roster_entries', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetUserRosterEntriesResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetUserRosterEntriesResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetUserRosterEntriesResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETUSERROSTERENTRIESRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=979,
serialized_end=1316,
)
_GETGROUPSREQUEST = _descriptor.Descriptor(
name='GetGroupsRequest',
full_name='mobile.entity.v1.GetGroupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetGroupsRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1318,
serialized_end=1382,
)
_GETGROUPSRESPONSE = _descriptor.Descriptor(
name='GetGroupsResponse',
full_name='mobile.entity.v1.GetGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetGroupsResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='groups', full_name='mobile.entity.v1.GetGroupsResponse.groups', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetGroupsResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetGroupsResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetGroupsResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETGROUPSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1385,
serialized_end=1668,
)
_GETCONVOSREQUEST = _descriptor.Descriptor(
name='GetConvosRequest',
full_name='mobile.entity.v1.GetConvosRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetConvosRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1670,
serialized_end=1736,
)
_GETCONVOSRESPONSE = _descriptor.Descriptor(
name='GetConvosResponse',
full_name='mobile.entity.v1.GetConvosResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetConvosResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convos', full_name='mobile.entity.v1.GetConvosResponse.convos', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetConvosResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetConvosResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetConvosResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETCONVOSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1739,
serialized_end=2028,
)
_GETTRUSTEDBOTSREQUEST = _descriptor.Descriptor(
name='GetTrustedBotsRequest',
full_name='mobile.entity.v1.GetTrustedBotsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page_token', full_name='mobile.entity.v1.GetTrustedBotsRequest.page_token', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2030,
serialized_end=2102,
)
_GETTRUSTEDBOTSRESPONSE = _descriptor.Descriptor(
name='GetTrustedBotsResponse',
full_name='mobile.entity.v1.GetTrustedBotsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetTrustedBotsResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trusted_bots', full_name='mobile.entity.v1.GetTrustedBotsResponse.trusted_bots', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\000\200\001\210\''))),
_descriptor.FieldDescriptor(
name='page_token', full_name='mobile.entity.v1.GetTrustedBotsResponse.page_token', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRUSTEDBOTSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2105,
serialized_end=2318,
)
_PAGETOKEN = _descriptor.Descriptor(
name='PageToken',
full_name='mobile.entity.v1.PageToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='mobile.entity.v1.PageToken.token', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2320,
serialized_end=2346,
)
_GETGROUPROSTERENTRIESREQUEST = _descriptor.Descriptor(
name='GetGroupRosterEntriesRequest',
full_name='mobile.entity.v1.GetGroupRosterEntriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetGroupRosterEntriesRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2348,
serialized_end=2424,
)
_GETGROUPROSTERENTRIESRESPONSE = _descriptor.Descriptor(
name='GetGroupRosterEntriesResponse',
full_name='mobile.entity.v1.GetGroupRosterEntriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group_roster_entries', full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.group_roster_entries', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetGroupRosterEntriesResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETGROUPROSTERENTRIESRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2427,
serialized_end=2759,
)
_REQUESTEDJID = _descriptor.Descriptor(
name='RequestedJid',
full_name='mobile.entity.v1.RequestedJid',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alias_jid', full_name='mobile.entity.v1.RequestedJid.alias_jid', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='jid_type', full_name='mobile.entity.v1.RequestedJid.jid_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=2761,
serialized_end=2831,
)
_GETUSERSBYALIASREQUEST = _descriptor.Descriptor(
name='GetUsersByAliasRequest',
full_name='mobile.entity.v1.GetUsersByAliasRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='mobile.entity.v1.GetUsersByAliasRequest.ids', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\000\200\001\024'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2833,
serialized_end=2913,
)
_PUBLICGROUPMEMBERPROFILE = _descriptor.Descriptor(
name='PublicGroupMemberProfile',
full_name='mobile.entity.v1.PublicGroupMemberProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bio_element', full_name='mobile.entity.v1.PublicGroupMemberProfile.bio_element', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='profile_pic', full_name='mobile.entity.v1.PublicGroupMemberProfile.profile_pic', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='background_profile_pic_extension', full_name='mobile.entity.v1.PublicGroupMemberProfile.background_profile_pic_extension', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='registration_element', full_name='mobile.entity.v1.PublicGroupMemberProfile.registration_element', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interests_element', full_name='mobile.entity.v1.PublicGroupMemberProfile.interests_element', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='emoji_status_element', full_name='mobile.entity.v1.PublicGroupMemberProfile.emoji_status_element', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='mobile.entity.v1.PublicGroupMemberProfile.display_name', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kin_user_id_element', full_name='mobile.entity.v1.PublicGroupMemberProfile.kin_user_id_element', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2916,
serialized_end=3474,
)
_ANONCHATMEMBERPROFILE = _descriptor.Descriptor(
name='AnonChatMemberProfile',
full_name='mobile.entity.v1.AnonChatMemberProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='profile_pic', full_name='mobile.entity.v1.AnonChatMemberProfile.profile_pic', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='mobile.entity.v1.AnonChatMemberProfile.display_name', index=1,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3477,
serialized_end=3618,
)
_PRIVATEPROFILE = _descriptor.Descriptor(
name='PrivateProfile',
full_name='mobile.entity.v1.PrivateProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='mobile.entity.v1.PrivateProfile.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='username', full_name='mobile.entity.v1.PrivateProfile.username', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3620,
serialized_end=3724,
)
_GETUSERSBYALIASPAYLOAD = _descriptor.Descriptor(
name='GetUsersByAliasPayload',
full_name='mobile.entity.v1.GetUsersByAliasPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='public_group_member_profile', full_name='mobile.entity.v1.GetUsersByAliasPayload.public_group_member_profile', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='anon_chat_member_profile', full_name='mobile.entity.v1.GetUsersByAliasPayload.anon_chat_member_profile', index=1,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='private_profile', full_name='mobile.entity.v1.GetUsersByAliasPayload.private_profile', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='mobile.entity.v1.GetUsersByAliasPayload.id', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_index', full_name='mobile.entity.v1.GetUsersByAliasPayload.request_index', index=4,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3727,
serialized_end=4033,
)
_GETUSERSBYALIASRESPONSE = _descriptor.Descriptor(
name='GetUsersByAliasResponse',
full_name='mobile.entity.v1.GetUsersByAliasResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.entity.v1.GetUsersByAliasResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payloads', full_name='mobile.entity.v1.GetUsersByAliasResponse.payloads', index=1,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retriable_ids', full_name='mobile.entity.v1.GetUsersByAliasResponse.retriable_ids', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_ids', full_name='mobile.entity.v1.GetUsersByAliasResponse.failed_ids', index=3,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_found_ids', full_name='mobile.entity.v1.GetUsersByAliasResponse.not_found_ids', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETUSERSBYALIASRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4036,
serialized_end=4380,
)
_GETUSERSREQUEST.fields_by_name['ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERSRESPONSE.fields_by_name['result'].enum_type = _GETUSERSRESPONSE_RESULT
_GETUSERSRESPONSE.fields_by_name['users'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYUSER
_GETUSERSRESPONSE.fields_by_name['retriable_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERSRESPONSE.fields_by_name['failed_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERSRESPONSE.fields_by_name['not_found_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERSRESPONSE_RESULT.containing_type = _GETUSERSRESPONSE
_GETBOTSREQUEST.fields_by_name['ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETBOTSRESPONSE.fields_by_name['result'].enum_type = _GETBOTSRESPONSE_RESULT
_GETBOTSRESPONSE.fields_by_name['bots'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYBOT
_GETBOTSRESPONSE.fields_by_name['retriable_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETBOTSRESPONSE.fields_by_name['failed_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETBOTSRESPONSE.fields_by_name['not_found_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETBOTSRESPONSE_RESULT.containing_type = _GETBOTSRESPONSE
_GETUSERROSTERENTRIESREQUEST.fields_by_name['ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERROSTERENTRIESRESPONSE.fields_by_name['result'].enum_type = _GETUSERROSTERENTRIESRESPONSE_RESULT
_GETUSERROSTERENTRIESRESPONSE.fields_by_name['user_roster_entries'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYUSERROSTERENTRY
_GETUSERROSTERENTRIESRESPONSE.fields_by_name['retriable_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERROSTERENTRIESRESPONSE.fields_by_name['failed_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERROSTERENTRIESRESPONSE.fields_by_name['not_found_ids'].message_type = common__model__pb2._XIBAREUSERJID
_GETUSERROSTERENTRIESRESPONSE_RESULT.containing_type = _GETUSERROSTERENTRIESRESPONSE
_GETGROUPSREQUEST.fields_by_name['ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPSRESPONSE.fields_by_name['result'].enum_type = _GETGROUPSRESPONSE_RESULT
_GETGROUPSRESPONSE.fields_by_name['groups'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYGROUP
_GETGROUPSRESPONSE.fields_by_name['retriable_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPSRESPONSE.fields_by_name['failed_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPSRESPONSE.fields_by_name['not_found_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPSRESPONSE_RESULT.containing_type = _GETGROUPSRESPONSE
_GETCONVOSREQUEST.fields_by_name['ids'].message_type = common_dot_v1_dot_model__pb2._XICONVOID
_GETCONVOSRESPONSE.fields_by_name['result'].enum_type = _GETCONVOSRESPONSE_RESULT
_GETCONVOSRESPONSE.fields_by_name['convos'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYCONVO
_GETCONVOSRESPONSE.fields_by_name['retriable_ids'].message_type = common_dot_v1_dot_model__pb2._XICONVOID
_GETCONVOSRESPONSE.fields_by_name['failed_ids'].message_type = common_dot_v1_dot_model__pb2._XICONVOID
_GETCONVOSRESPONSE.fields_by_name['not_found_ids'].message_type = common_dot_v1_dot_model__pb2._XICONVOID
_GETCONVOSRESPONSE_RESULT.containing_type = _GETCONVOSRESPONSE
_GETTRUSTEDBOTSREQUEST.fields_by_name['page_token'].message_type = _PAGETOKEN
_GETTRUSTEDBOTSRESPONSE.fields_by_name['result'].enum_type = _GETTRUSTEDBOTSRESPONSE_RESULT
_GETTRUSTEDBOTSRESPONSE.fields_by_name['trusted_bots'].message_type = common__model__pb2._XIBAREUSERJID
_GETTRUSTEDBOTSRESPONSE.fields_by_name['page_token'].message_type = _PAGETOKEN
_GETTRUSTEDBOTSRESPONSE_RESULT.containing_type = _GETTRUSTEDBOTSRESPONSE
_GETGROUPROSTERENTRIESREQUEST.fields_by_name['ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPROSTERENTRIESRESPONSE.fields_by_name['result'].enum_type = _GETGROUPROSTERENTRIESRESPONSE_RESULT
_GETGROUPROSTERENTRIESRESPONSE.fields_by_name['group_roster_entries'].message_type = entity_dot_v1_dot_entity__common__pb2._ENTITYGROUPROSTERENTRY
_GETGROUPROSTERENTRIESRESPONSE.fields_by_name['retriable_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPROSTERENTRIESRESPONSE.fields_by_name['failed_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPROSTERENTRIESRESPONSE.fields_by_name['not_found_ids'].message_type = common__model__pb2._XIGROUPJID
_GETGROUPROSTERENTRIESRESPONSE_RESULT.containing_type = _GETGROUPROSTERENTRIESRESPONSE
_REQUESTEDJID.fields_by_name['alias_jid'].message_type = common_dot_v1_dot_model__pb2._XIALIASJID
_REQUESTEDJID.oneofs_by_name['jid_type'].fields.append(
_REQUESTEDJID.fields_by_name['alias_jid'])
_REQUESTEDJID.fields_by_name['alias_jid'].containing_oneof = _REQUESTEDJID.oneofs_by_name['jid_type']
_GETUSERSBYALIASREQUEST.fields_by_name['ids'].message_type = _REQUESTEDJID
_PUBLICGROUPMEMBERPROFILE.fields_by_name['bio_element'].message_type = entity_dot_v1_dot_element__common__pb2._BIOELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['profile_pic'].message_type = entity_dot_v1_dot_element__common__pb2._PROFILEPICELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['background_profile_pic_extension'].message_type = entity_dot_v1_dot_element__common__pb2._BACKGROUNDPROFILEPICEXTENSIONELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['registration_element'].message_type = entity_dot_v1_dot_element__common__pb2._REGISTRATIONELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['interests_element'].message_type = entity_dot_v1_dot_element__common__pb2._INTERESTSELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['emoji_status_element'].message_type = entity_dot_v1_dot_element__common__pb2._EMOJISTATUSELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['display_name'].message_type = entity_dot_v1_dot_element__common__pb2._DISPLAYNAMEELEMENT
_PUBLICGROUPMEMBERPROFILE.fields_by_name['kin_user_id_element'].message_type = entity_dot_v1_dot_element__common__pb2._KINUSERIDELEMENT
_ANONCHATMEMBERPROFILE.fields_by_name['profile_pic'].message_type = entity_dot_v1_dot_element__common__pb2._PROFILEPICELEMENT
_ANONCHATMEMBERPROFILE.fields_by_name['display_name'].message_type = entity_dot_v1_dot_element__common__pb2._DISPLAYNAMEELEMENT
_PRIVATEPROFILE.fields_by_name['id'].message_type = common__model__pb2._XIBAREUSERJID
_PRIVATEPROFILE.fields_by_name['username'].message_type = entity_dot_v1_dot_element__common__pb2._USERNAMEELEMENT
_GETUSERSBYALIASPAYLOAD.fields_by_name['public_group_member_profile'].message_type = _PUBLICGROUPMEMBERPROFILE
_GETUSERSBYALIASPAYLOAD.fields_by_name['anon_chat_member_profile'].message_type = _ANONCHATMEMBERPROFILE
_GETUSERSBYALIASPAYLOAD.fields_by_name['private_profile'].message_type = _PRIVATEPROFILE
_GETUSERSBYALIASPAYLOAD.fields_by_name['id'].message_type = _REQUESTEDJID
_GETUSERSBYALIASRESPONSE.fields_by_name['result'].enum_type = _GETUSERSBYALIASRESPONSE_RESULT
_GETUSERSBYALIASRESPONSE.fields_by_name['payloads'].message_type = _GETUSERSBYALIASPAYLOAD
_GETUSERSBYALIASRESPONSE.fields_by_name['retriable_ids'].message_type = _REQUESTEDJID
_GETUSERSBYALIASRESPONSE.fields_by_name['failed_ids'].message_type = _REQUESTEDJID
_GETUSERSBYALIASRESPONSE.fields_by_name['not_found_ids'].message_type = _REQUESTEDJID
_GETUSERSBYALIASRESPONSE_RESULT.containing_type = _GETUSERSBYALIASRESPONSE
DESCRIPTOR.message_types_by_name['GetUsersRequest'] = _GETUSERSREQUEST
DESCRIPTOR.message_types_by_name['GetUsersResponse'] = _GETUSERSRESPONSE
DESCRIPTOR.message_types_by_name['GetBotsRequest'] = _GETBOTSREQUEST
DESCRIPTOR.message_types_by_name['GetBotsResponse'] = _GETBOTSRESPONSE
DESCRIPTOR.message_types_by_name['GetUserRosterEntriesRequest'] = _GETUSERROSTERENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetUserRosterEntriesResponse'] = _GETUSERROSTERENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['GetGroupsRequest'] = _GETGROUPSREQUEST
DESCRIPTOR.message_types_by_name['GetGroupsResponse'] = _GETGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['GetConvosRequest'] = _GETCONVOSREQUEST
DESCRIPTOR.message_types_by_name['GetConvosResponse'] = _GETCONVOSRESPONSE
DESCRIPTOR.message_types_by_name['GetTrustedBotsRequest'] = _GETTRUSTEDBOTSREQUEST
DESCRIPTOR.message_types_by_name['GetTrustedBotsResponse'] = _GETTRUSTEDBOTSRESPONSE
DESCRIPTOR.message_types_by_name['PageToken'] = _PAGETOKEN
DESCRIPTOR.message_types_by_name['GetGroupRosterEntriesRequest'] = _GETGROUPROSTERENTRIESREQUEST
DESCRIPTOR.message_types_by_name['GetGroupRosterEntriesResponse'] = _GETGROUPROSTERENTRIESRESPONSE
DESCRIPTOR.message_types_by_name['RequestedJid'] = _REQUESTEDJID
DESCRIPTOR.message_types_by_name['GetUsersByAliasRequest'] = _GETUSERSBYALIASREQUEST
DESCRIPTOR.message_types_by_name['PublicGroupMemberProfile'] = _PUBLICGROUPMEMBERPROFILE
DESCRIPTOR.message_types_by_name['AnonChatMemberProfile'] = _ANONCHATMEMBERPROFILE
DESCRIPTOR.message_types_by_name['PrivateProfile'] = _PRIVATEPROFILE
DESCRIPTOR.message_types_by_name['GetUsersByAliasPayload'] = _GETUSERSBYALIASPAYLOAD
DESCRIPTOR.message_types_by_name['GetUsersByAliasResponse'] = _GETUSERSBYALIASRESPONSE
GetUsersRequest = _reflection.GeneratedProtocolMessageType('GetUsersRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERSREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUsersRequest)
))
_sym_db.RegisterMessage(GetUsersRequest)
GetUsersResponse = _reflection.GeneratedProtocolMessageType('GetUsersResponse', (_message.Message,), dict(
DESCRIPTOR = _GETUSERSRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUsersResponse)
))
_sym_db.RegisterMessage(GetUsersResponse)
GetBotsRequest = _reflection.GeneratedProtocolMessageType('GetBotsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETBOTSREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetBotsRequest)
))
_sym_db.RegisterMessage(GetBotsRequest)
GetBotsResponse = _reflection.GeneratedProtocolMessageType('GetBotsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETBOTSRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetBotsResponse)
))
_sym_db.RegisterMessage(GetBotsResponse)
GetUserRosterEntriesRequest = _reflection.GeneratedProtocolMessageType('GetUserRosterEntriesRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERROSTERENTRIESREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUserRosterEntriesRequest)
))
_sym_db.RegisterMessage(GetUserRosterEntriesRequest)
GetUserRosterEntriesResponse = _reflection.GeneratedProtocolMessageType('GetUserRosterEntriesResponse', (_message.Message,), dict(
DESCRIPTOR = _GETUSERROSTERENTRIESRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUserRosterEntriesResponse)
))
_sym_db.RegisterMessage(GetUserRosterEntriesResponse)
GetGroupsRequest = _reflection.GeneratedProtocolMessageType('GetGroupsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPSREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetGroupsRequest)
))
_sym_db.RegisterMessage(GetGroupsRequest)
GetGroupsResponse = _reflection.GeneratedProtocolMessageType('GetGroupsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPSRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetGroupsResponse)
))
_sym_db.RegisterMessage(GetGroupsResponse)
GetConvosRequest = _reflection.GeneratedProtocolMessageType('GetConvosRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCONVOSREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetConvosRequest)
))
_sym_db.RegisterMessage(GetConvosRequest)
GetConvosResponse = _reflection.GeneratedProtocolMessageType('GetConvosResponse', (_message.Message,), dict(
DESCRIPTOR = _GETCONVOSRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetConvosResponse)
))
_sym_db.RegisterMessage(GetConvosResponse)
GetTrustedBotsRequest = _reflection.GeneratedProtocolMessageType('GetTrustedBotsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTRUSTEDBOTSREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetTrustedBotsRequest)
))
_sym_db.RegisterMessage(GetTrustedBotsRequest)
GetTrustedBotsResponse = _reflection.GeneratedProtocolMessageType('GetTrustedBotsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETTRUSTEDBOTSRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetTrustedBotsResponse)
))
_sym_db.RegisterMessage(GetTrustedBotsResponse)
PageToken = _reflection.GeneratedProtocolMessageType('PageToken', (_message.Message,), dict(
DESCRIPTOR = _PAGETOKEN,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.PageToken)
))
_sym_db.RegisterMessage(PageToken)
GetGroupRosterEntriesRequest = _reflection.GeneratedProtocolMessageType('GetGroupRosterEntriesRequest', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPROSTERENTRIESREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetGroupRosterEntriesRequest)
))
_sym_db.RegisterMessage(GetGroupRosterEntriesRequest)
GetGroupRosterEntriesResponse = _reflection.GeneratedProtocolMessageType('GetGroupRosterEntriesResponse', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPROSTERENTRIESRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetGroupRosterEntriesResponse)
))
_sym_db.RegisterMessage(GetGroupRosterEntriesResponse)
RequestedJid = _reflection.GeneratedProtocolMessageType('RequestedJid', (_message.Message,), dict(
DESCRIPTOR = _REQUESTEDJID,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.RequestedJid)
))
_sym_db.RegisterMessage(RequestedJid)
GetUsersByAliasRequest = _reflection.GeneratedProtocolMessageType('GetUsersByAliasRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERSBYALIASREQUEST,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUsersByAliasRequest)
))
_sym_db.RegisterMessage(GetUsersByAliasRequest)
PublicGroupMemberProfile = _reflection.GeneratedProtocolMessageType('PublicGroupMemberProfile', (_message.Message,), dict(
DESCRIPTOR = _PUBLICGROUPMEMBERPROFILE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.PublicGroupMemberProfile)
))
_sym_db.RegisterMessage(PublicGroupMemberProfile)
AnonChatMemberProfile = _reflection.GeneratedProtocolMessageType('AnonChatMemberProfile', (_message.Message,), dict(
DESCRIPTOR = _ANONCHATMEMBERPROFILE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.AnonChatMemberProfile)
))
_sym_db.RegisterMessage(AnonChatMemberProfile)
PrivateProfile = _reflection.GeneratedProtocolMessageType('PrivateProfile', (_message.Message,), dict(
DESCRIPTOR = _PRIVATEPROFILE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.PrivateProfile)
))
_sym_db.RegisterMessage(PrivateProfile)
GetUsersByAliasPayload = _reflection.GeneratedProtocolMessageType('GetUsersByAliasPayload', (_message.Message,), dict(
DESCRIPTOR = _GETUSERSBYALIASPAYLOAD,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUsersByAliasPayload)
))
_sym_db.RegisterMessage(GetUsersByAliasPayload)
GetUsersByAliasResponse = _reflection.GeneratedProtocolMessageType('GetUsersByAliasResponse', (_message.Message,), dict(
DESCRIPTOR = _GETUSERSBYALIASRESPONSE,
__module__ = 'entity.v1.entity_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.entity.v1.GetUsersByAliasResponse)
))
_sym_db.RegisterMessage(GetUsersByAliasResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.kik.entity.mobileZJgithub.com/kikinteractive/xiphias-api-mobile/generated/go/entity/v1;entity'))
_GETUSERSREQUEST.fields_by_name['ids'].has_options = True
_GETUSERSREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETBOTSREQUEST.fields_by_name['ids'].has_options = True
_GETBOTSREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETUSERROSTERENTRIESREQUEST.fields_by_name['ids'].has_options = True
_GETUSERROSTERENTRIESREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETGROUPSREQUEST.fields_by_name['ids'].has_options = True
_GETGROUPSREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETCONVOSREQUEST.fields_by_name['ids'].has_options = True
_GETCONVOSREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETTRUSTEDBOTSRESPONSE.fields_by_name['trusted_bots'].has_options = True
_GETTRUSTEDBOTSRESPONSE.fields_by_name['trusted_bots']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\000\200\001\210\''))
_GETGROUPROSTERENTRIESREQUEST.fields_by_name['ids'].has_options = True
_GETGROUPROSTERENTRIESREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001\024'))
_GETUSERSBYALIASREQUEST.fields_by_name['ids'].has_options = True
_GETUSERSBYALIASREQUEST.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\000\200\001\024'))
# @@protoc_insertion_point(module_scope)
| mit |
ext/slideshow-frontend | slideshow/lib/queue.py | 1 | 3299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from slideshow.lib.slide import Slide
from slideshow.settings import Settings
import slideshow.event as event
import cherrypy
def div_id(id):
if id >= 0:
return 'queue_%d' % id
elif id == -1:
return 'queue_int'
else:
raise ValueError, 'Invalid id for div_id: %d' % id
class Queue:
def __init__(self, c, id, name, loop):
self.id = id
self.div = div_id(id)
self.name = name
self.loop = loop == 1
self.slides = [Slide(queue=self, **x) for x in c.execute("""
SELECT
`id`,
DATETIME(`timestamp`) AS `timestamp`,
`path`,
`active`,
`assembler`,
`data`
FROM
`slide`
WHERE
`queue_id` = :queue
ORDER BY
`sortorder`
""", {'queue': id}).fetchall()]
def as_json(self):
return {
'id': self.id,
'name': self.name,
'loop': self.loop,
'readonly': self.id <= 1,
'slides': [x.as_json() for x in self.slides]
}
def __len__(self):
return len(self.slides)
def rename(self, c, name):
c.execute("""
UPDATE
`queue`
SET
`name` = :name
WHERE
`id` = :id
""", dict(id=self.id, name=name))
self.name = name
def all(c):
return [Queue(c, **x) for x in c.execute("""
SELECT
`id`,
`name`,
`loop`
FROM
`queue`
""").fetchall()]
def from_id(c, id):
row = c.execute("""
SELECT
`id`,
`name`,
`loop`
FROM
`queue`
WHERE
`id` = :id
LIMIT 1
""", dict(id=id)).fetchone()
if row is None:
return None
return Queue(c, **row)
def add(c, name):
c.execute("""
INSERT INTO `queue` (
`name`
) VALUES (
:name
)
""", dict(name=name))
row_id = c.last_row_id()
n = int(c.execute("SELECT COUNT(*) as `count` FROM `queue`").fetchone()['count'])
# if no previous queue (except default) existed, make this the active
if n == 3:
print 'derp'
activate(row_id)
return from_id(c, row_id)
def delete(c, id):
if id <= 0:
return False
c.execute("""
UPDATE
`slide`
SET
`queue_id` = 0
WHERE
`queue_id` = :id
""", dict(id=id))
c.execute("""
DELETE FROM
`queue`
WHERE
`id` = :id
""", dict(id=id))
return True
def activate(id):
settings = Settings()
with settings:
settings['Runtime.queue'] = id
settings.persist()
event.trigger('config.queue_changed', id)
def set_loop(id, state):
c = cherrypy.thread_data.db
c.execute("""
UPDATE
`queue`
SET
`loop` = :state
WHERE
`id` = :id
""", dict(id=id, state=state))
c.commit()
# to force reloading of queue settings
event.trigger('config.queue_changed', id)
| agpl-3.0 |
marklar/fowles | py/ids_ventilator.py | 1 | 1430 | import re
import time
import zmq
#
# The input file is expected to have on each line:
# 1. a video_id
# 2. a tab
# 3. a channel_id
# The provided file (below: INPUT_FILE) matches that format.
#
# cfg
INPUT_FILE = "py/10_pairs_of_vid_and_chan_ids.txt"
PORT = 5557
# globals
context = None
pusher = None
def mk_addr(p):
return "tcp://*:%d" % (p)
def get_pusher():
global context, pusher
if pusher is None:
context = zmq.Context()
pusher = context.socket(zmq.PUSH)
pusher.bind( mk_addr(PORT) )
return pusher
def submit_json(msg):
get_pusher().send_json(msg)
def get_lines(fname):
with open(fname) as f:
return (ln.strip('\n') for ln in f.readlines())
def send(msg):
print "sending: %s" % (msg)
submit_json(msg)
def videos(vid_id):
send({'request': 'videos',
'id': vid_id})
def channels(chan_id):
send({'request': 'channels',
'id': chan_id})
def activities(chan_id):
send({'request': 'activities',
'channelId': chan_id})
def playlistItems(chan_id):
playlist_id = re.sub('^UC', 'UU', chan_id)
send({'request': 'playlistItems',
'playlistId': playlist_id})
def push_video_ids(fname):
for ln in get_lines(fname):
[vid_id, chan_id] = ln.split()
videos(vid_id)
channels(chan_id)
activities(chan_id)
playlistItems(chan_id)
push_video_ids(INPUT_FILE)
| mit |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/django/core/cache/backends/memcached.py | 82 | 7200 | "Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango19Warning, RenameMethodsBase,
)
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCacheMethods(RenameMethodsBase):
renamed_methods = (
('_get_memcache_timeout', 'get_backend_timeout', RemovedInDjango19Warning),
)
class BaseMemcachedCache(six.with_metaclass(BaseMemcachedCacheMethods, BaseCache)):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS', None)
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| mit |
zenmanenergy/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py | 426 | 2174 | """The match_hostname() function from Python 3.2, essential when using SSL."""
import re
__version__ = '3.2.2'
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| lgpl-2.1 |
weiting-chen/manila | manila/network/linux/interface.py | 4 | 7230 | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_config import cfg
from oslo_log import log
import six
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila.i18n import _LW
from manila.network.linux import ip_lib
from manila.network.linux import ovs_lib
from manila import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use.')),
]
CONF = cfg.CONF
CONF.register_opts(OPTS)
def device_name_synchronized(f):
"""Wraps methods with interprocess locks by device names."""
def wrapped_func(self, *args, **kwargs):
device_name = "device_name_%s" % args[0]
@utils.synchronized("linux_interface_%s" % device_name, external=True)
def source_func(self, *args, **kwargs):
return f(self, *args, **kwargs)
return source_func(self, *args, **kwargs)
return wrapped_func
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
# from linux IF_NAMESIZE
DEV_NAME_LEN = 14
DEV_NAME_PREFIX = 'tap'
def __init__(self):
self.conf = CONF
@device_name_synchronized
def init_l3(self, device_name, ip_cidrs, namespace=None):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
"""
device = ip_lib.IPDevice(device_name,
namespace=namespace)
previous = {}
for address in device.addr.list(scope='global', filters=['permanent']):
previous[address['cidr']] = address['ip_version']
# add new addresses
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
if ip_cidr in previous:
del previous[ip_cidr]
continue
device.addr.add(net.version, ip_cidr, str(net.broadcast))
# clean up any old addresses
for ip_cidr, ip_version in previous.items():
device.addr.delete(ip_version, ip_cidr)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exception.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port['id'])[:self.DEV_NAME_LEN]
@abc.abstractmethod
def plug(self, device_name, port_id, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = 'tap'
def _get_tap_name(self, dev_name):
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
cmd = ['ovs-vsctl', '--', '--may-exist',
'add-port', bridge, device_name]
if internal:
cmd += ['--', 'set', 'Interface', device_name, 'type=internal']
cmd += ['--', 'set', 'Interface', device_name,
'external-ids:iface-id=%s' % port_id,
'--', 'set', 'Interface', device_name,
'external-ids:iface-status=active',
'--', 'set', 'Interface', device_name,
'external-ids:attached-mac=%s' % mac_address]
utils.execute(*cmd, run_as_root=True)
@device_name_synchronized
def plug(self, device_name, port_id, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper()
ns_dev = ip.device(device_name)
if not ip_lib.device_exists(device_name,
namespace=namespace):
tap_name = self._get_tap_name(device_name)
self._ovs_add_port(bridge, tap_name, port_id, mac_address)
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
if namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
else:
LOG.warn(_LW("Device %s already exists"), device_name)
ns_dev.link.set_up()
@device_name_synchronized
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge)
try:
ovs.delete_port(tap_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
@device_name_synchronized
def plug(self, device_name, port_id, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plugin the interface."""
ip = ip_lib.IPWrapper()
if prefix:
tap_name = device_name.replace(prefix, 'tap')
else:
tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap')
if not ip_lib.device_exists(device_name,
namespace=namespace):
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_veth.link.set_address(mac_address)
else:
ns_veth = ip.device(device_name)
root_veth = ip.device(tap_name)
LOG.warn(_LW("Device %s already exists"), device_name)
root_veth.link.set_up()
ns_veth.link.set_up()
@device_name_synchronized
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, namespace)
try:
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
| apache-2.0 |
groschovskiy/lerigos_music | Server/API/lib/Crypto/SelfTest/PublicKey/__init__.py | 114 | 1842 | # -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/__init__.py: Self-test for public key crypto
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for public-key crypto"""
__revision__ = "$Id$"
import os
def get_tests(config={}):
tests = []
from Crypto.SelfTest.PublicKey import test_DSA; tests += test_DSA.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_RSA; tests += test_RSA.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_importKey; tests += test_importKey.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_ElGamal; tests += test_ElGamal.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
yaojingwu1992/XlsxWriter | xlsxwriter/test/comparison/test_chart_axis03.py | 8 | 2245 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [65514112, 65556864]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$D$1:$D$5',
})
chart.set_title({'name': 'Title'})
chart.set_x_axis({'name': 'XXX'})
chart.set_y_axis({'name': 'YYY'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
mcollins12321/anita | venv/lib/python2.7/site-packages/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
iabdalkader/micropython | tests/perf_bench/bm_fft.py | 15 | 2107 | # Copyright (c) 2019 Project Nayuki. (MIT License)
# https://www.nayuki.io/page/free-small-fft-in-multiple-languages
import math, cmath
def transform_radix2(vector, inverse):
# Returns the integer whose value is the reverse of the lowest 'bits' bits of the integer 'x'.
def reverse(x, bits):
y = 0
for i in range(bits):
y = (y << 1) | (x & 1)
x >>= 1
return y
# Initialization
n = len(vector)
levels = int(math.log2(n))
coef = (2 if inverse else -2) * cmath.pi / n
exptable = [cmath.rect(1, i * coef) for i in range(n // 2)]
vector = [vector[reverse(i, levels)] for i in range(n)] # Copy with bit-reversed permutation
# Radix-2 decimation-in-time FFT
size = 2
while size <= n:
halfsize = size // 2
tablestep = n // size
for i in range(0, n, size):
k = 0
for j in range(i, i + halfsize):
temp = vector[j + halfsize] * exptable[k]
vector[j + halfsize] = vector[j] - temp
vector[j] += temp
k += tablestep
size *= 2
return vector
###########################################################################
# Benchmark interface
bm_params = {
(50, 25): (2, 128),
(100, 100): (3, 256),
(1000, 1000): (20, 512),
(5000, 1000): (100, 512),
}
def bm_setup(params):
state = None
signal = [math.cos(2 * math.pi * i / params[1]) + 0j for i in range(params[1])]
fft = None
fft_inv = None
def run():
nonlocal fft, fft_inv
for _ in range(params[0]):
fft = transform_radix2(signal, False)
fft_inv = transform_radix2(fft, True)
def result():
nonlocal fft, fft_inv
fft[1] -= 0.5 * params[1]
fft[-1] -= 0.5 * params[1]
fft_ok = all(abs(f) < 1e-3 for f in fft)
for i in range(len(fft_inv)):
fft_inv[i] -= params[1] * signal[i]
fft_inv_ok = all(abs(f) < 1e-3 for f in fft_inv)
return params[0] * params[1], (fft_ok, fft_inv_ok)
return run, result
| mit |
StempG/jstorm | bin/jstorm.py | 19 | 16272 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
import os
import sys
import random
import subprocess as sub
import getopt
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
return lines[0]
if sys.platform == "cygwin":
normclasspath = cygpath
else:
normclasspath = identity
CLIENT_CONF_FILE = ""
JSTORM_DIR = "/".join(os.path.realpath( __file__ ).split("/")[:-2])
JSTORM_CONF_DIR = os.getenv("JSTORM_CONF_DIR", JSTORM_DIR + "/conf" )
LOGBACK_CONF = JSTORM_CONF_DIR + "/jstorm.logback.xml"
CONFIG_OPTS = []
EXCLUDE_JARS = []
INCLUDE_JARS = []
STATUS = 0
def check_java():
check_java_cmd = 'which java'
ret = os.system(check_java_cmd)
if ret != 0:
print("Failed to find java, please add java to PATH")
sys.exit(-1)
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%")
def get_client_childopts():
ret = (" -Dstorm.root.logger=INFO,stdout -Dlogback.configurationFile=" + JSTORM_DIR +
"/conf/client_logback.xml -Dlog4j.configuration=File:" + JSTORM_DIR +
"/conf/client_log4j.properties")
if CLIENT_CONF_FILE != "":
ret += (" -Dstorm.conf.file=" + CLIENT_CONF_FILE)
return ret
def get_server_childopts(log_name):
ret = (" -Dlogfile.name=%s -Dlogback.configurationFile=%s" %(log_name, LOGBACK_CONF))
return ret
if not os.path.exists(JSTORM_DIR + "/RELEASE"):
print "******************************************"
print "The jstorm client can only be run from within a release. You appear to be trying to run the client from a checkout of JStorm's source code."
print "\nYou can download a JStorm release "
print "******************************************"
sys.exit(1)
def get_jars_full(adir):
files = os.listdir(adir)
ret = []
for f in files:
if f.endswith(".jar") == False:
continue
filter = False
for exclude_jar in EXCLUDE_JARS:
if f.find(exclude_jar) >= 0:
filter = True
break
if filter == True:
print "Don't add " + f + " to classpath"
else:
ret.append(adir + "/" + f)
return ret
def get_classpath(extrajars):
ret = []
ret.extend(extrajars)
ret.extend(get_jars_full(JSTORM_DIR))
ret.extend(get_jars_full(JSTORM_DIR + "/lib"))
ret.extend(INCLUDE_JARS)
return normclasspath(":".join(ret))
def confvalue(name, extrapaths):
command = [
"java", "-client", "-Xms256m", "-Xmx256m", get_config_opts(), "-cp", get_classpath(extrapaths), "backtype.storm.command.config_value", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
print "Failed to get config " + name
print errors
print output
def print_localconfvalue(name):
"""Syntax: [jstorm localconfvalue conf-name]
Prints out the value for conf-name in the local JStorm configs.
The local JStorm configs are the ones in ~/.jstorm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
def print_remoteconfvalue(name):
"""Syntax: [jstorm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's JStorm configs.
The cluster's JStorm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
def exec_storm_class(klass, jvmtype="-server", childopts="", extrajars=[], args=[]):
nativepath = confvalue("java.library.path", extrajars)
args_str = " ".join(map(lambda s: "\"" + s + "\"", args))
command = "java " + jvmtype + " -Djstorm.home=" + JSTORM_DIR + " " + get_config_opts() + " -Djava.library.path=" + nativepath + " " + childopts + " -cp " + get_classpath(extrajars) + " " + klass + " " + args_str
print "Running: " + command
global STATUS
STATUS = os.system(command)
def jar(jarfile, klass, *args):
"""Syntax: [jstorm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The jstorm jars and configs in $JSTORM_CONF_DIR/storm.yaml are put on the classpath.
The process is configured so that StormSubmitter
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
will upload the jar at topology-jar-path when the topology is submitted.
"""
childopts = "-Dstorm.jar=" + jarfile + get_client_childopts()
exec_storm_class(
klass,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[jarfile, JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
args=args,
childopts=childopts)
def zktool(*args):
"""Syntax: [jstorm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The jstorm jars and configs in ~/.jstorm are put on the classpath.
The process is configured so that StormSubmitter
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
will upload the jar at topology-jar-path when the topology is submitted.
"""
childopts = get_client_childopts()
exec_storm_class(
"com.alibaba.jstorm.zk.ZkTool",
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[ JSTORM_CONF_DIR, CLIENT_CONF_FILE],
args=args,
childopts=childopts)
def kill(*args):
"""Syntax: [jstorm kill topology-name [wait-time-secs]]
Kills the topology with the name topology-name. JStorm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. JStorm will then shutdown
the workers and clean up their state. You can override the length
of time JStorm waits between deactivation and shutdown.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.kill_topology",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def activate(*args):
"""Syntax: [jstorm activate topology-name]
Activates the specified topology's spouts.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.activate",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def deactivate(*args):
"""Syntax: [jstorm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.deactivate",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def rebalance(*args):
"""Syntax: [jstorm rebalance topology-name [-w wait-time-secs]]
Sometimes you may wish to spread out where the workers for a topology
are running. For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have JStorm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but JStorm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout and then redistribute
the workers evenly around the cluster. The topology will then return to
its previous state of activation (so a deactivated topology will still
be deactivated and an activated topology will go back to being activated).
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.rebalance",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def restart(*args):
"""Syntax: [jstorm restart topology-name [conf]]
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.restart",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def update_config(*args):
"""Syntax: [jstorm restart topology-name [conf]]
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.update_config",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def nimbus():
"""Syntax: [jstorm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a JStorm cluster for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
nimbus_classpath = confvalue("nimbus.classpath", cppaths)
childopts = confvalue("nimbus.childopts", cppaths) + get_server_childopts("nimbus.log")
exec_storm_class(
"com.alibaba.jstorm.daemon.nimbus.NimbusServer",
jvmtype="-server",
extrajars=(cppaths+[nimbus_classpath]),
childopts=childopts)
def supervisor():
"""Syntax: [jstorm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a JStorm cluster for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
childopts = confvalue("supervisor.childopts", cppaths) + get_server_childopts("supervisor.log")
exec_storm_class(
"com.alibaba.jstorm.daemon.supervisor.Supervisor",
jvmtype="-server",
extrajars=cppaths,
childopts=childopts)
def drpc():
"""Syntax: [jstorm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation)
"""
cppaths = [JSTORM_CONF_DIR]
childopts = confvalue("drpc.childopts", cppaths) + get_server_childopts("drpc.log")
exec_storm_class(
"com.alibaba.jstorm.drpc.Drpc",
jvmtype="-server",
extrajars=cppaths,
childopts=childopts)
def print_classpath():
"""Syntax: [jstorm classpath]
Prints the classpath used by the jstorm client when running commands.
"""
print get_classpath([])
def print_commands():
"""Print all client commands and link to documentation"""
print "jstorm command [--config client_storm.yaml] [--exclude-jars exclude1.jar,exclude2.jar] [-c key1=value1,key2=value2][command parameter]"
print "Commands:\n\t", "\n\t".join(sorted(COMMANDS.keys()))
print "\n\t[--config client_storm.yaml]\t\t\t optional, setting client's storm.yaml"
print "\n\t[--exclude-jars exclude1.jar,exclude2.jar]\t optional, exclude jars, avoid jar conflict"
print "\n\t[-c key1=value1,key2=value2]\t\t\t optional, add key=value pair to configuration"
print "\nHelp:", "\n\thelp", "\n\thelp <command>"
print "\nDocumentation for the jstorm client can be found at https://github.com/alibaba/jstorm/wiki/JStorm-Chinese-Documentation\n"
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if COMMANDS.has_key(command):
print (COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print "<%s> is not a valid command" % command
else:
print_commands()
def unknown_command(*args):
print "Unknown command: [jstorm %s]" % ' '.join(sys.argv[1:])
print_usage()
def metrics_Monitor(*args):
"""Syntax: [jstorm metricsMonitor topologyname bool]
Enable or disable the metrics monitor of one topology.
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.metrics_monitor",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
def list(*args):
"""Syntax: [jstorm list]
List cluster information
"""
childopts = get_client_childopts()
exec_storm_class(
"backtype.storm.command.list",
args=args,
jvmtype="-client -Xms256m -Xmx256m",
extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
childopts=childopts)
COMMANDS = {"jar": jar, "kill": kill, "nimbus": nimbus, "zktool": zktool,
"drpc": drpc, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "classpath": print_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"metricsMonitor": metrics_Monitor, "list": list, "restart": restart, "update_config": update_config}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_exclude_jars(jars):
global EXCLUDE_JARS
EXCLUDE_JARS = jars.split(",")
print " Excludes jars:"
print EXCLUDE_JARS
def parse_include_jars(jars):
global INCLUDE_JARS
INCLUDE_JARS = jars.split(",")
print " Include jars:"
print INCLUDE_JARS
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CLIENT_CONF_FILE
CLIENT_CONF_FILE = curr.pop()
elif token == "--exclude-jars":
parse_exclude_jars(curr.pop())
elif token == "--include-jars":
parse_include_jars(curr.pop())
else:
args_list.append(token)
return config_list, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS
config_list, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
COMMAND = args[0]
ARGS = args[1:]
if COMMANDS.get(COMMAND) == None:
unknown_command(COMMAND)
sys.exit(-1)
if len(ARGS) != 0 and ARGS[0] == "help":
print_usage(COMMAND)
sys.exit(0)
try:
(COMMANDS.get(COMMAND, "help"))(*ARGS)
except Exception, msg:
print(msg)
print_usage(COMMAND)
sys.exit(-1)
sys.exit(STATUS)
if __name__ == "__main__":
check_java()
main()
| apache-2.0 |
daniponi/django | django/contrib/gis/management/commands/ogrinspect.py | 48 | 6020 | import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = (
'Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode'
)
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument(
'--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.',
)
parser.add_argument(
'--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field (defaults to `geom`)'
)
parser.add_argument(
'--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.',
)
parser.add_argument(
'--multi-geom', action='store_true',
dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.',
)
parser.add_argument(
'--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.',
)
parser.add_argument(
'--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` statement.',
)
parser.add_argument(
'--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.',
)
parser.add_argument(
'--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.',
)
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {
'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
| bsd-3-clause |
termie/nova-migration-demo | nova/api/ec2/metadatarequesthandler.py | 2 | 2711 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata request handler."""
import webob.dec
import webob.exc
from nova import log as logging
from nova import flags
from nova import wsgi
from nova.api.ec2 import cloud
LOG = logging.getLogger('nova.api.ec2.metadata')
FLAGS = flags.FLAGS
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata from the EC2 API."""
def print_data(self, data):
if isinstance(data, dict):
output = ''
for key in data:
if key == '_name':
continue
output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
output += '=' + str(data[key]['_name'])
else:
output += '/'
output += '\n'
# Cut off last \n
return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
return str(data)
def lookup(self, path, data):
items = path.split('/')
for item in items:
if item:
if not isinstance(data, dict):
return data
if not item in data:
return None
data = data[item]
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
cc = cloud.CloudController()
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
meta_data = cc.get_metadata(remote_address)
if meta_data is None:
LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
if data is None:
raise webob.exc.HTTPNotFound()
return self.print_data(data)
| apache-2.0 |
paulsmith/geodjango | tests/regressiontests/forms/localflavor/fi.py | 20 | 21360 | # -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ FI form fields.
tests = r"""
# FIZipCodeField #############################################################
FIZipCodeField validates that the data is a valid FI zipcode.
>>> from django.contrib.localflavor.fi.forms import FIZipCodeField
>>> f = FIZipCodeField()
>>> f.clean('20540')
u'20540'
>>> f.clean('20101')
u'20101'
>>> f.clean('20s40')
Traceback (most recent call last):
...
ValidationError: [u'Enter a zip code in the format XXXXX.']
>>> f.clean('205401')
Traceback (most recent call last):
...
ValidationError: [u'Enter a zip code in the format XXXXX.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = FIZipCodeField(required=False)
>>> f.clean('20540')
u'20540'
>>> f.clean('20101')
u'20101'
>>> f.clean('20s40')
Traceback (most recent call last):
...
ValidationError: [u'Enter a zip code in the format XXXXX.']
>>> f.clean('205401')
Traceback (most recent call last):
...
ValidationError: [u'Enter a zip code in the format XXXXX.']
>>> f.clean(None)
u''
>>> f.clean('')
u''
# FIMunicipalitySelect ###############################################################
A Select widget that uses a list of Finnish municipalities as its choices.
>>> from django.contrib.localflavor.fi.forms import FIMunicipalitySelect
>>> w = FIMunicipalitySelect()
>>> unicode(w.render('municipalities', 'turku'))
u'<select name="municipalities">\n<option value="akaa">Akaa</option>\n<option value="alaharma">Alah\xe4rm\xe4</option>\n<option value="alajarvi">Alaj\xe4rvi</option>\n<option value="alastaro">Alastaro</option>\n<option value="alavieska">Alavieska</option>\n<option value="alavus">Alavus</option>\n<option value="anjalankoski">Anjalankoski</option>\n<option value="artjarvi">Artj\xe4rvi</option>\n<option value="asikkala">Asikkala</option>\n<option value="askainen">Askainen</option>\n<option value="askola">Askola</option>\n<option value="aura">Aura</option>\n<option value="brando">Br\xe4nd\xf6</option>\n<option value="dragsfjard">Dragsfj\xe4rd</option>\n<option value="eckero">Ecker\xf6</option>\n<option value="elimaki">Elim\xe4ki</option>\n<option value="eno">Eno</option>\n<option value="enonkoski">Enonkoski</option>\n<option value="enontekio">Enonteki\xf6</option>\n<option value="espoo">Espoo</option>\n<option value="eura">Eura</option>\n<option value="eurajoki">Eurajoki</option>\n<option value="evijarvi">Evij\xe4rvi</option>\n<option value="finstrom">Finstr\xf6m</option>\n<option value="forssa">Forssa</option>\n<option value="foglo">F\xf6gl\xf6</option>\n<option value="geta">Geta</option>\n<option value="haapajarvi">Haapaj\xe4rvi</option>\n<option value="haapavesi">Haapavesi</option>\n<option value="hailuoto">Hailuoto</option>\n<option value="halikko">Halikko</option>\n<option value="halsua">Halsua</option>\n<option value="hamina">Hamina</option>\n<option value="hammarland">Hammarland</option>\n<option value="hankasalmi">Hankasalmi</option>\n<option value="hanko">Hanko</option>\n<option value="harjavalta">Harjavalta</option>\n<option value="hartola">Hartola</option>\n<option value="hattula">Hattula</option>\n<option value="hauho">Hauho</option>\n<option value="haukipudas">Haukipudas</option>\n<option value="hausjarvi">Hausj\xe4rvi</option>\n<option value="heinola">Heinola</option>\n<option value="heinavesi">Hein\xe4vesi</option>\n<option value="helsinki">Helsinki</option>\n<option value="himanka">Himanka</option>\n<option value="hirvensalmi">Hirvensalmi</option>\n<option value="hollola">Hollola</option>\n<option value="honkajoki">Honkajoki</option>\n<option value="houtskari">Houtskari</option>\n<option value="huittinen">Huittinen</option>\n<option value="humppila">Humppila</option>\n<option value="hyrynsalmi">Hyrynsalmi</option>\n<option value="hyvinkaa">Hyvink\xe4\xe4</option>\n<option value="hameenkoski">H\xe4meenkoski</option>\n<option value="hameenkyro">H\xe4meenkyr\xf6</option>\n<option value="hameenlinna">H\xe4meenlinna</option>\n<option value="ii">Ii</option>\n<option value="iisalmi">Iisalmi</option>\n<option value="iitti">Iitti</option>\n<option value="ikaalinen">Ikaalinen</option>\n<option value="ilmajoki">Ilmajoki</option>\n<option value="ilomantsi">Ilomantsi</option>\n<option value="imatra">Imatra</option>\n<option value="inari">Inari</option>\n<option value="inio">Ini\xf6</option>\n<option value="inkoo">Inkoo</option>\n<option value="isojoki">Isojoki</option>\n<option value="isokyro">Isokyr\xf6</option>\n<option value="jaala">Jaala</option>\n<option value="jalasjarvi">Jalasj\xe4rvi</option>\n<option value="janakkala">Janakkala</option>\n<option value="joensuu">Joensuu</option>\n<option value="jokioinen">Jokioinen</option>\n<option value="jomala">Jomala</option>\n<option value="joroinen">Joroinen</option>\n<option value="joutsa">Joutsa</option>\n<option value="joutseno">Joutseno</option>\n<option value="juankoski">Juankoski</option>\n<option value="jurva">Jurva</option>\n<option value="juuka">Juuka</option>\n<option value="juupajoki">Juupajoki</option>\n<option value="juva">Juva</option>\n<option value="jyvaskyla">Jyv\xe4skyl\xe4</option>\n<option value="jyvaskylan_mlk">Jyv\xe4skyl\xe4n maalaiskunta</option>\n<option value="jamijarvi">J\xe4mij\xe4rvi</option>\n<option value="jamsa">J\xe4ms\xe4</option>\n<option value="jamsankoski">J\xe4ms\xe4nkoski</option>\n<option value="jarvenpaa">J\xe4rvenp\xe4\xe4</option>\n<option value="kaarina">Kaarina</option>\n<option value="kaavi">Kaavi</option>\n<option value="kajaani">Kajaani</option>\n<option value="kalajoki">Kalajoki</option>\n<option value="kalvola">Kalvola</option>\n<option value="kangasala">Kangasala</option>\n<option value="kangasniemi">Kangasniemi</option>\n<option value="kankaanpaa">Kankaanp\xe4\xe4</option>\n<option value="kannonkoski">Kannonkoski</option>\n<option value="kannus">Kannus</option>\n<option value="karijoki">Karijoki</option>\n<option value="karjaa">Karjaa</option>\n<option value="karjalohja">Karjalohja</option>\n<option value="karkkila">Karkkila</option>\n<option value="karstula">Karstula</option>\n<option value="karttula">Karttula</option>\n<option value="karvia">Karvia</option>\n<option value="kaskinen">Kaskinen</option>\n<option value="kauhajoki">Kauhajoki</option>\n<option value="kauhava">Kauhava</option>\n<option value="kauniainen">Kauniainen</option>\n<option value="kaustinen">Kaustinen</option>\n<option value="keitele">Keitele</option>\n<option value="kemi">Kemi</option>\n<option value="kemijarvi">Kemij\xe4rvi</option>\n<option value="keminmaa">Keminmaa</option>\n<option value="kemio">Kemi\xf6</option>\n<option value="kempele">Kempele</option>\n<option value="kerava">Kerava</option>\n<option value="kerimaki">Kerim\xe4ki</option>\n<option value="kestila">Kestil\xe4</option>\n<option value="kesalahti">Kes\xe4lahti</option>\n<option value="keuruu">Keuruu</option>\n<option value="kihnio">Kihni\xf6</option>\n<option value="kiikala">Kiikala</option>\n<option value="kiikoinen">Kiikoinen</option>\n<option value="kiiminki">Kiiminki</option>\n<option value="kinnula">Kinnula</option>\n<option value="kirkkonummi">Kirkkonummi</option>\n<option value="kisko">Kisko</option>\n<option value="kitee">Kitee</option>\n<option value="kittila">Kittil\xe4</option>\n<option value="kiukainen">Kiukainen</option>\n<option value="kiuruvesi">Kiuruvesi</option>\n<option value="kivijarvi">Kivij\xe4rvi</option>\n<option value="kokemaki">Kokem\xe4ki</option>\n<option value="kokkola">Kokkola</option>\n<option value="kolari">Kolari</option>\n<option value="konnevesi">Konnevesi</option>\n<option value="kontiolahti">Kontiolahti</option>\n<option value="korpilahti">Korpilahti</option>\n<option value="korppoo">Korppoo</option>\n<option value="korsnas">Korsn\xe4s</option>\n<option value="kortesjarvi">Kortesj\xe4rvi</option>\n<option value="koskitl">KoskiTl</option>\n<option value="kotka">Kotka</option>\n<option value="kouvola">Kouvola</option>\n<option value="kristiinankaupunki">Kristiinankaupunki</option>\n<option value="kruunupyy">Kruunupyy</option>\n<option value="kuhmalahti">Kuhmalahti</option>\n<option value="kuhmo">Kuhmo</option>\n<option value="kuhmoinen">Kuhmoinen</option>\n<option value="kumlinge">Kumlinge</option>\n<option value="kuopio">Kuopio</option>\n<option value="kuortane">Kuortane</option>\n<option value="kurikka">Kurikka</option>\n<option value="kuru">Kuru</option>\n<option value="kustavi">Kustavi</option>\n<option value="kuusamo">Kuusamo</option>\n<option value="kuusankoski">Kuusankoski</option>\n<option value="kuusjoki">Kuusjoki</option>\n<option value="kylmakoski">Kylm\xe4koski</option>\n<option value="kyyjarvi">Kyyj\xe4rvi</option>\n<option value="kalvia">K\xe4lvi\xe4</option>\n<option value="karkola">K\xe4rk\xf6l\xe4</option>\n<option value="karsamaki">K\xe4rs\xe4m\xe4ki</option>\n<option value="kokar">K\xf6kar</option>\n<option value="koylio">K\xf6yli\xf6</option>\n<option value="lahti">Lahti</option>\n<option value="laihia">Laihia</option>\n<option value="laitila">Laitila</option>\n<option value="lammi">Lammi</option>\n<option value="lapinjarvi">Lapinj\xe4rvi</option>\n<option value="lapinlahti">Lapinlahti</option>\n<option value="lappajarvi">Lappaj\xe4rvi</option>\n<option value="lappeenranta">Lappeenranta</option>\n<option value="lappi">Lappi</option>\n<option value="lapua">Lapua</option>\n<option value="laukaa">Laukaa</option>\n<option value="lavia">Lavia</option>\n<option value="lehtimaki">Lehtim\xe4ki</option>\n<option value="leivonmaki">Leivonm\xe4ki</option>\n<option value="lemi">Lemi</option>\n<option value="lemland">Lemland</option>\n<option value="lempaala">Lemp\xe4\xe4l\xe4</option>\n<option value="lemu">Lemu</option>\n<option value="leppavirta">Lepp\xe4virta</option>\n<option value="lestijarvi">Lestij\xe4rvi</option>\n<option value="lieksa">Lieksa</option>\n<option value="lieto">Lieto</option>\n<option value="liljendal">Liljendal</option>\n<option value="liminka">Liminka</option>\n<option value="liperi">Liperi</option>\n<option value="lohja">Lohja</option>\n<option value="lohtaja">Lohtaja</option>\n<option value="loimaa">Loimaa</option>\n<option value="loppi">Loppi</option>\n<option value="loviisa">Loviisa</option>\n<option value="luhanka">Luhanka</option>\n<option value="lumijoki">Lumijoki</option>\n<option value="lumparland">Lumparland</option>\n<option value="luoto">Luoto</option>\n<option value="luumaki">Luum\xe4ki</option>\n<option value="luvia">Luvia</option>\n<option value="maalahti">Maalahti</option>\n<option value="maaninka">Maaninka</option>\n<option value="maarianhamina">Maarianhamina</option>\n<option value="marttila">Marttila</option>\n<option value="masku">Masku</option>\n<option value="mellila">Mellil\xe4</option>\n<option value="merijarvi">Merij\xe4rvi</option>\n<option value="merikarvia">Merikarvia</option>\n<option value="merimasku">Merimasku</option>\n<option value="miehikkala">Miehikk\xe4l\xe4</option>\n<option value="mikkeli">Mikkeli</option>\n<option value="mouhijarvi">Mouhij\xe4rvi</option>\n<option value="muhos">Muhos</option>\n<option value="multia">Multia</option>\n<option value="muonio">Muonio</option>\n<option value="mustasaari">Mustasaari</option>\n<option value="muurame">Muurame</option>\n<option value="muurla">Muurla</option>\n<option value="mynamaki">Myn\xe4m\xe4ki</option>\n<option value="myrskyla">Myrskyl\xe4</option>\n<option value="mantsala">M\xe4nts\xe4l\xe4</option>\n<option value="mantta">M\xe4ntt\xe4</option>\n<option value="mantyharju">M\xe4ntyharju</option>\n<option value="naantali">Naantali</option>\n<option value="nakkila">Nakkila</option>\n<option value="nastola">Nastola</option>\n<option value="nauvo">Nauvo</option>\n<option value="nilsia">Nilsi\xe4</option>\n<option value="nivala">Nivala</option>\n<option value="nokia">Nokia</option>\n<option value="noormarkku">Noormarkku</option>\n<option value="nousiainen">Nousiainen</option>\n<option value="nummi-pusula">Nummi-Pusula</option>\n<option value="nurmes">Nurmes</option>\n<option value="nurmijarvi">Nurmij\xe4rvi</option>\n<option value="nurmo">Nurmo</option>\n<option value="narpio">N\xe4rpi\xf6</option>\n<option value="oravainen">Oravainen</option>\n<option value="orimattila">Orimattila</option>\n<option value="oripaa">Orip\xe4\xe4</option>\n<option value="orivesi">Orivesi</option>\n<option value="oulainen">Oulainen</option>\n<option value="oulu">Oulu</option>\n<option value="oulunsalo">Oulunsalo</option>\n<option value="outokumpu">Outokumpu</option>\n<option value="padasjoki">Padasjoki</option>\n<option value="paimio">Paimio</option>\n<option value="paltamo">Paltamo</option>\n<option value="parainen">Parainen</option>\n<option value="parikkala">Parikkala</option>\n<option value="parkano">Parkano</option>\n<option value="pedersore">Peders\xf6re</option>\n<option value="pelkosenniemi">Pelkosenniemi</option>\n<option value="pello">Pello</option>\n<option value="perho">Perho</option>\n<option value="pernaja">Pernaja</option>\n<option value="pernio">Perni\xf6</option>\n<option value="pertteli">Pertteli</option>\n<option value="pertunmaa">Pertunmaa</option>\n<option value="petajavesi">Pet\xe4j\xe4vesi</option>\n<option value="pieksamaki">Pieks\xe4m\xe4ki</option>\n<option value="pielavesi">Pielavesi</option>\n<option value="pietarsaari">Pietarsaari</option>\n<option value="pihtipudas">Pihtipudas</option>\n<option value="piikkio">Piikki\xf6</option>\n<option value="piippola">Piippola</option>\n<option value="pirkkala">Pirkkala</option>\n<option value="pohja">Pohja</option>\n<option value="polvijarvi">Polvij\xe4rvi</option>\n<option value="pomarkku">Pomarkku</option>\n<option value="pori">Pori</option>\n<option value="pornainen">Pornainen</option>\n<option value="porvoo">Porvoo</option>\n<option value="posio">Posio</option>\n<option value="pudasjarvi">Pudasj\xe4rvi</option>\n<option value="pukkila">Pukkila</option>\n<option value="pulkkila">Pulkkila</option>\n<option value="punkaharju">Punkaharju</option>\n<option value="punkalaidun">Punkalaidun</option>\n<option value="puolanka">Puolanka</option>\n<option value="puumala">Puumala</option>\n<option value="pyhtaa">Pyht\xe4\xe4</option>\n<option value="pyhajoki">Pyh\xe4joki</option>\n<option value="pyhajarvi">Pyh\xe4j\xe4rvi</option>\n<option value="pyhanta">Pyh\xe4nt\xe4</option>\n<option value="pyharanta">Pyh\xe4ranta</option>\n<option value="pyhaselka">Pyh\xe4selk\xe4</option>\n<option value="pylkonmaki">Pylk\xf6nm\xe4ki</option>\n<option value="palkane">P\xe4lk\xe4ne</option>\n<option value="poytya">P\xf6yty\xe4</option>\n<option value="raahe">Raahe</option>\n<option value="raisio">Raisio</option>\n<option value="rantasalmi">Rantasalmi</option>\n<option value="rantsila">Rantsila</option>\n<option value="ranua">Ranua</option>\n<option value="rauma">Rauma</option>\n<option value="rautalampi">Rautalampi</option>\n<option value="rautavaara">Rautavaara</option>\n<option value="rautjarvi">Rautj\xe4rvi</option>\n<option value="reisjarvi">Reisj\xe4rvi</option>\n<option value="renko">Renko</option>\n<option value="riihimaki">Riihim\xe4ki</option>\n<option value="ristiina">Ristiina</option>\n<option value="ristijarvi">Ristij\xe4rvi</option>\n<option value="rovaniemi">Rovaniemi</option>\n<option value="ruokolahti">Ruokolahti</option>\n<option value="ruotsinpyhtaa">Ruotsinpyht\xe4\xe4</option>\n<option value="ruovesi">Ruovesi</option>\n<option value="rusko">Rusko</option>\n<option value="rymattyla">Rym\xe4ttyl\xe4</option>\n<option value="raakkyla">R\xe4\xe4kkyl\xe4</option>\n<option value="saarijarvi">Saarij\xe4rvi</option>\n<option value="salla">Salla</option>\n<option value="salo">Salo</option>\n<option value="saltvik">Saltvik</option>\n<option value="sammatti">Sammatti</option>\n<option value="sauvo">Sauvo</option>\n<option value="savitaipale">Savitaipale</option>\n<option value="savonlinna">Savonlinna</option>\n<option value="savonranta">Savonranta</option>\n<option value="savukoski">Savukoski</option>\n<option value="seinajoki">Sein\xe4joki</option>\n<option value="sievi">Sievi</option>\n<option value="siikainen">Siikainen</option>\n<option value="siikajoki">Siikajoki</option>\n<option value="siilinjarvi">Siilinj\xe4rvi</option>\n<option value="simo">Simo</option>\n<option value="sipoo">Sipoo</option>\n<option value="siuntio">Siuntio</option>\n<option value="sodankyla">Sodankyl\xe4</option>\n<option value="soini">Soini</option>\n<option value="somero">Somero</option>\n<option value="sonkajarvi">Sonkaj\xe4rvi</option>\n<option value="sotkamo">Sotkamo</option>\n<option value="sottunga">Sottunga</option>\n<option value="sulkava">Sulkava</option>\n<option value="sund">Sund</option>\n<option value="suomenniemi">Suomenniemi</option>\n<option value="suomusjarvi">Suomusj\xe4rvi</option>\n<option value="suomussalmi">Suomussalmi</option>\n<option value="suonenjoki">Suonenjoki</option>\n<option value="sysma">Sysm\xe4</option>\n<option value="sakyla">S\xe4kyl\xe4</option>\n<option value="sarkisalo">S\xe4rkisalo</option>\n<option value="taipalsaari">Taipalsaari</option>\n<option value="taivalkoski">Taivalkoski</option>\n<option value="taivassalo">Taivassalo</option>\n<option value="tammela">Tammela</option>\n<option value="tammisaari">Tammisaari</option>\n<option value="tampere">Tampere</option>\n<option value="tarvasjoki">Tarvasjoki</option>\n<option value="tervo">Tervo</option>\n<option value="tervola">Tervola</option>\n<option value="teuva">Teuva</option>\n<option value="tohmajarvi">Tohmaj\xe4rvi</option>\n<option value="toholampi">Toholampi</option>\n<option value="toivakka">Toivakka</option>\n<option value="tornio">Tornio</option>\n<option value="turku" selected="selected">Turku</option>\n<option value="tuulos">Tuulos</option>\n<option value="tuusniemi">Tuusniemi</option>\n<option value="tuusula">Tuusula</option>\n<option value="tyrnava">Tyrn\xe4v\xe4</option>\n<option value="toysa">T\xf6ys\xe4</option>\n<option value="ullava">Ullava</option>\n<option value="ulvila">Ulvila</option>\n<option value="urjala">Urjala</option>\n<option value="utajarvi">Utaj\xe4rvi</option>\n<option value="utsjoki">Utsjoki</option>\n<option value="uurainen">Uurainen</option>\n<option value="uusikaarlepyy">Uusikaarlepyy</option>\n<option value="uusikaupunki">Uusikaupunki</option>\n<option value="vaala">Vaala</option>\n<option value="vaasa">Vaasa</option>\n<option value="vahto">Vahto</option>\n<option value="valkeakoski">Valkeakoski</option>\n<option value="valkeala">Valkeala</option>\n<option value="valtimo">Valtimo</option>\n<option value="vammala">Vammala</option>\n<option value="vampula">Vampula</option>\n<option value="vantaa">Vantaa</option>\n<option value="varkaus">Varkaus</option>\n<option value="varpaisjarvi">Varpaisj\xe4rvi</option>\n<option value="vehmaa">Vehmaa</option>\n<option value="velkua">Velkua</option>\n<option value="vesanto">Vesanto</option>\n<option value="vesilahti">Vesilahti</option>\n<option value="veteli">Veteli</option>\n<option value="vierema">Vierem\xe4</option>\n<option value="vihanti">Vihanti</option>\n<option value="vihti">Vihti</option>\n<option value="viitasaari">Viitasaari</option>\n<option value="vilppula">Vilppula</option>\n<option value="vimpeli">Vimpeli</option>\n<option value="virolahti">Virolahti</option>\n<option value="virrat">Virrat</option>\n<option value="vardo">V\xe5rd\xf6</option>\n<option value="vahakyro">V\xe4h\xe4kyr\xf6</option>\n<option value="vastanfjard">V\xe4stanfj\xe4rd</option>\n<option value="voyri-maksamaa">V\xf6yri-Maksamaa</option>\n<option value="yliharma">Ylih\xe4rm\xe4</option>\n<option value="yli-ii">Yli-Ii</option>\n<option value="ylikiiminki">Ylikiiminki</option>\n<option value="ylistaro">Ylistaro</option>\n<option value="ylitornio">Ylitornio</option>\n<option value="ylivieska">Ylivieska</option>\n<option value="ylamaa">Yl\xe4maa</option>\n<option value="ylane">Yl\xe4ne</option>\n<option value="ylojarvi">Yl\xf6j\xe4rvi</option>\n<option value="ypaja">Yp\xe4j\xe4</option>\n<option value="aetsa">\xc4ets\xe4</option>\n<option value="ahtari">\xc4ht\xe4ri</option>\n<option value="aanekoski">\xc4\xe4nekoski</option>\n</select>'
# FISocialSecurityNumber ##############################################################
>>> from django.contrib.localflavor.fi.forms import FISocialSecurityNumber
>>> f = FISocialSecurityNumber()
>>> f.clean('010101-0101')
u'010101-0101'
>>> f.clean('010101+0101')
u'010101+0101'
>>> f.clean('010101A0101')
u'010101A0101'
>>> f.clean('101010-0102')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid Finnish social security number.']
>>> f.clean('10a010-0101')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid Finnish social security number.']
>>> f.clean('101010-0\xe401')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid Finnish social security number.']
>>> f.clean('101010b0101')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid Finnish social security number.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = FISocialSecurityNumber(required=False)
>>> f.clean('010101-0101')
u'010101-0101'
>>> f.clean(None)
u''
>>> f.clean('')
u''
"""
| bsd-3-clause |
roadmapper/ansible | lib/ansible/modules/cloud/kubevirt/kubevirt_rs.py | 21 | 6703 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_rs
short_description: Manage KubeVirt virtual machine replica sets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine replica sets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine replica set.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine replica set exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine."
required: true
type: dict
replicas:
description:
- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
- Replicas defaults to 1 if newly created replica set.
type: int
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine replica set 'myvmir'
kubevirt_rs:
state: present
name: myvmir
namespace: vms
wait: true
replicas: 3
memory: 64M
labels:
myvmi: myvmi
selector:
matchLabels:
myvmi: myvmi
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Remove virtual machine replica set 'myvmir'
kubevirt_rs:
state: absent
name: myvmir
namespace: vms
wait: true
'''
RETURN = '''
kubevirt_rs:
description:
- The virtual machine virtual machine replica set managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
)
KIND = 'VirtualMachineInstanceReplicaSet'
VMIR_ARG_SPEC = {
'replicas': {'type': 'int'},
'selector': {'type': 'dict'},
}
class KubeVirtVMIRS(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
return argument_spec
def wait_for_replicas(self, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
resource = self.find_supported_resource(KIND)
return_obj = None
for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
readyReplicas = status.get('readyReplicas', 0)
if readyReplicas == replicas:
return_obj = entity
break
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj.to_dict()
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
replicas = self.params.get('replicas')
if selector:
definition['spec']['selector'] = selector
if replicas is not None:
definition['spec']['replicas'] = replicas
# defaults for template
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
# Execute the CURD of VM:
template = definition['spec']['template']
dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
# wait logic work correctly
if changed and result_crud['method'] == 'create' and replicas is None:
replicas = 1
# Wait for the new number of ready replicas after a CRUD update
# Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
# Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
# achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
result = self.wait_for_replicas(replicas)
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_rs': result,
'result': result_crud,
})
def main():
module = KubeVirtVMIRS()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
ThomasSweijen/TPF | doc/sphinx/conf.py | 1 | 28022 | # -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
if 0:
refnode=addnodes.pending_xref(rawtext,reftype=role,refexplicit=explicitText,reftarget=target)
#refnode.line=lineno
#refnode+=nodes.literal(rawtext,text,classes=['ref',role])
return [refnode],[]
#ret.rawtext,reftype=role,
else:
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={V. Smilauer, E. Catalano, B. Chareyre, S. Dorofeenko, J. Duriez, A. Gladky, J. Kozicki, C. Modenese, L. Scholtes, L. Sibille, J. Stransky, K. Thoeni}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large Freelance consultant (http://woodem.eu)}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
TonnyXu/Zxing | cpp/scons/scons-time.py | 34 | 50485 | #!/usr/bin/env python
#
# scons-time - run SCons timings and collect statistics
#
# A script for running a configuration through SCons with a standard
# set of invocations to collect timing and memory statistics and to
# capture the results in a consistent set of output files for display
# and analysis.
#
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
from __future__ import nested_scopes
__revision__ = "src/script/scons-time.py 5023 2010/06/14 22:05:46 scons"
import getopt
import glob
import os
import re
import shutil
import sys
import tempfile
import time
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is not None:
# We can't apply the 'callable' fixer until the floor is 2.6, but the
# '-3' option to Python 2.6 and 2.7 generates almost ten thousand
# warnings. This hack allows us to run regression tests with the '-3'
# option by replacing the callable() built-in function with a hack
# that performs the same function but doesn't generate the warning.
# Note that this hack is ONLY intended to be used for regression
# testing, and should NEVER be used for real runs.
from types import ClassType
def callable(obj):
if hasattr(obj, '__call__'): return True
if isinstance(obj, (ClassType, type)): return True
return False
def make_temp_file(**kw):
try:
result = tempfile.mktemp(**kw)
try:
result = os.path.realpath(result)
except AttributeError:
# Python 2.1 has no os.path.realpath() method.
pass
except TypeError:
try:
save_template = tempfile.template
prefix = kw['prefix']
del kw['prefix']
tempfile.template = prefix
result = tempfile.mktemp(**kw)
finally:
tempfile.template = save_template
return result
def HACK_for_exec(cmd, *args):
'''
For some reason, Python won't allow an exec() within a function
that also declares an internal function (including lambda functions).
This function is a hack that calls exec() in a function with no
internal functions.
'''
if not args: exec(cmd)
elif len(args) == 1: exec cmd in args[0]
else: exec cmd in args[0], args[1]
class Plotter(object):
def increment_size(self, largest):
"""
Return the size of each horizontal increment line for a specified
maximum value. This returns a value that will provide somewhere
between 5 and 9 horizontal lines on the graph, on some set of
boundaries that are multiples of 10/100/1000/etc.
"""
i = largest // 5
if not i:
return largest
multiplier = 1
while i >= 10:
i = i // 10
multiplier = multiplier * 10
return i * multiplier
def max_graph_value(self, largest):
# Round up to next integer.
largest = int(largest) + 1
increment = self.increment_size(largest)
return ((largest + increment - 1) // increment) * increment
class Line(object):
def __init__(self, points, type, title, label, comment, fmt="%s %s"):
self.points = points
self.type = type
self.title = title
self.label = label
self.comment = comment
self.fmt = fmt
def print_label(self, inx, x, y):
if self.label:
print 'set label %s "%s" at %s,%s right' % (inx, self.label, x, y)
def plot_string(self):
if self.title:
title_string = 'title "%s"' % self.title
else:
title_string = 'notitle'
return "'-' %s with lines lt %s" % (title_string, self.type)
def print_points(self, fmt=None):
if fmt is None:
fmt = self.fmt
if self.comment:
print '# %s' % self.comment
for x, y in self.points:
# If y is None, it usually represents some kind of break
# in the line's index number. We might want to represent
# this some way rather than just drawing the line straight
# between the two points on either side.
if not y is None:
print fmt % (x, y)
print 'e'
def get_x_values(self):
return [ p[0] for p in self.points ]
def get_y_values(self):
return [ p[1] for p in self.points ]
class Gnuplotter(Plotter):
def __init__(self, title, key_location):
self.lines = []
self.title = title
self.key_location = key_location
def line(self, points, type, title=None, label=None, comment=None, fmt='%s %s'):
if points:
line = Line(points, type, title, label, comment, fmt)
self.lines.append(line)
def plot_string(self, line):
return line.plot_string()
def vertical_bar(self, x, type, label, comment):
if self.get_min_x() <= x and x <= self.get_max_x():
points = [(x, 0), (x, self.max_graph_value(self.get_max_y()))]
self.line(points, type, label, comment)
def get_all_x_values(self):
result = []
for line in self.lines:
result.extend(line.get_x_values())
return [r for r in result if not r is None]
def get_all_y_values(self):
result = []
for line in self.lines:
result.extend(line.get_y_values())
return [r for r in result if not r is None]
def get_min_x(self):
try:
return self.min_x
except AttributeError:
try:
self.min_x = min(self.get_all_x_values())
except ValueError:
self.min_x = 0
return self.min_x
def get_max_x(self):
try:
return self.max_x
except AttributeError:
try:
self.max_x = max(self.get_all_x_values())
except ValueError:
self.max_x = 0
return self.max_x
def get_min_y(self):
try:
return self.min_y
except AttributeError:
try:
self.min_y = min(self.get_all_y_values())
except ValueError:
self.min_y = 0
return self.min_y
def get_max_y(self):
try:
return self.max_y
except AttributeError:
try:
self.max_y = max(self.get_all_y_values())
except ValueError:
self.max_y = 0
return self.max_y
def draw(self):
if not self.lines:
return
if self.title:
print 'set title "%s"' % self.title
print 'set key %s' % self.key_location
min_y = self.get_min_y()
max_y = self.max_graph_value(self.get_max_y())
incr = (max_y - min_y) / 10.0
start = min_y + (max_y / 2.0) + (2.0 * incr)
position = [ start - (i * incr) for i in range(5) ]
inx = 1
for line in self.lines:
line.print_label(inx, line.points[0][0]-1,
position[(inx-1) % len(position)])
inx += 1
plot_strings = [ self.plot_string(l) for l in self.lines ]
print 'plot ' + ', \\\n '.join(plot_strings)
for line in self.lines:
line.print_points()
def untar(fname):
import tarfile
tar = tarfile.open(name=fname, mode='r')
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
def unzip(fname):
import zipfile
zf = zipfile.ZipFile(fname, 'r')
for name in zf.namelist():
dir = os.path.dirname(name)
try:
os.makedirs(dir)
except:
pass
open(name, 'w').write(zf.read(name))
def read_tree(dir):
for dirpath, dirnames, filenames in os.walk(dir):
for fn in filenames:
fn = os.path.join(dirpath, fn)
if os.path.isfile(fn):
open(fn, 'rb').read()
def redirect_to_file(command, log):
return '%s > %s 2>&1' % (command, log)
def tee_to_file(command, log):
return '%s 2>&1 | tee %s' % (command, log)
class SConsTimer(object):
"""
Usage: scons-time SUBCOMMAND [ARGUMENTS]
Type "scons-time help SUBCOMMAND" for help on a specific subcommand.
Available subcommands:
func Extract test-run data for a function
help Provides help
mem Extract --debug=memory data from test runs
obj Extract --debug=count data from test runs
time Extract --debug=time data from test runs
run Runs a test configuration
"""
name = 'scons-time'
name_spaces = ' '*len(name)
def makedict(**kw):
return kw
default_settings = makedict(
aegis = 'aegis',
aegis_project = None,
chdir = None,
config_file = None,
initial_commands = [],
key_location = 'bottom left',
orig_cwd = os.getcwd(),
outdir = None,
prefix = '',
python = '"%s"' % sys.executable,
redirect = redirect_to_file,
scons = None,
scons_flags = '--debug=count --debug=memory --debug=time --debug=memoizer',
scons_lib_dir = None,
scons_wrapper = None,
startup_targets = '--help',
subdir = None,
subversion_url = None,
svn = 'svn',
svn_co_flag = '-q',
tar = 'tar',
targets = '',
targets0 = None,
targets1 = None,
targets2 = None,
title = None,
unzip = 'unzip',
verbose = False,
vertical_bars = [],
unpack_map = {
'.tar.gz' : (untar, '%(tar)s xzf %%s'),
'.tgz' : (untar, '%(tar)s xzf %%s'),
'.tar' : (untar, '%(tar)s xf %%s'),
'.zip' : (unzip, '%(unzip)s %%s'),
},
)
run_titles = [
'Startup',
'Full build',
'Up-to-date build',
]
run_commands = [
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof0)s %(targets0)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof1)s %(targets1)s',
'%(python)s %(scons_wrapper)s %(scons_flags)s --profile=%(prof2)s %(targets2)s',
]
stages = [
'pre-read',
'post-read',
'pre-build',
'post-build',
]
stage_strings = {
'pre-read' : 'Memory before reading SConscript files:',
'post-read' : 'Memory after reading SConscript files:',
'pre-build' : 'Memory before building targets:',
'post-build' : 'Memory after building targets:',
}
memory_string_all = 'Memory '
default_stage = stages[-1]
time_strings = {
'total' : 'Total build time',
'SConscripts' : 'Total SConscript file execution time',
'SCons' : 'Total SCons execution time',
'commands' : 'Total command execution time',
}
time_string_all = 'Total .* time'
#
def __init__(self):
self.__dict__.update(self.default_settings)
# Functions for displaying and executing commands.
def subst(self, x, dictionary):
try:
return x % dictionary
except TypeError:
# x isn't a string (it's probably a Python function),
# so just return it.
return x
def subst_variables(self, command, dictionary):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
try:
command + ''
except TypeError:
action = command[0]
string = command[1]
args = command[2:]
else:
action = command
string = action
args = (())
action = self.subst(action, dictionary)
string = self.subst(string, dictionary)
return (action, string, args)
def _do_not_display(self, msg, *args):
pass
def display(self, msg, *args):
"""
Displays the specified message.
Each message is prepended with a standard prefix of our name
plus the time.
"""
if callable(msg):
msg = msg(*args)
else:
msg = msg % args
if msg is None:
return
fmt = '%s[%s]: %s\n'
sys.stdout.write(fmt % (self.name, time.strftime('%H:%M:%S'), msg))
def _do_not_execute(self, action, *args):
pass
def execute(self, action, *args):
"""
Executes the specified action.
The action is called if it's a callable Python function, and
otherwise passed to os.system().
"""
if callable(action):
action(*args)
else:
os.system(action % args)
def run_command_list(self, commands, dict):
"""
Executes a list of commands, substituting values from the
specified dictionary.
"""
commands = [ self.subst_variables(c, dict) for c in commands ]
for action, string, args in commands:
self.display(string, *args)
sys.stdout.flush()
status = self.execute(action, *args)
if status:
sys.exit(status)
def log_display(self, command, log):
command = self.subst(command, self.__dict__)
if log:
command = self.redirect(command, log)
return command
def log_execute(self, command, log):
command = self.subst(command, self.__dict__)
output = os.popen(command).read()
if self.verbose:
sys.stdout.write(output)
open(log, 'wb').write(output)
#
def archive_splitext(self, path):
"""
Splits an archive name into a filename base and extension.
This is like os.path.splitext() (which it calls) except that it
also looks for '.tar.gz' and treats it as an atomic extensions.
"""
if path.endswith('.tar.gz'):
return path[:-7], path[-7:]
else:
return os.path.splitext(path)
def args_to_files(self, args, tail=None):
"""
Takes a list of arguments, expands any glob patterns, and
returns the last "tail" files from the list.
"""
files = []
for a in args:
files.extend(sorted(glob.glob(a)))
if tail:
files = files[-tail:]
return files
def ascii_table(self, files, columns,
line_function, file_function=lambda x: x,
*args, **kw):
header_fmt = ' '.join(['%12s'] * len(columns))
line_fmt = header_fmt + ' %s'
print header_fmt % columns
for file in files:
t = line_function(file, *args, **kw)
if t is None:
t = []
diff = len(columns) - len(t)
if diff > 0:
t += [''] * diff
t.append(file_function(file))
print line_fmt % tuple(t)
def collect_results(self, files, function, *args, **kw):
results = {}
for file in files:
base = os.path.splitext(file)[0]
run, index = base.split('-')[-2:]
run = int(run)
index = int(index)
value = function(file, *args, **kw)
try:
r = results[index]
except KeyError:
r = []
results[index] = r
r.append((run, value))
return results
def doc_to_help(self, obj):
"""
Translates an object's __doc__ string into help text.
This strips a consistent number of spaces from each line in the
help text, essentially "outdenting" the text to the left-most
column.
"""
doc = obj.__doc__
if doc is None:
return ''
return self.outdent(doc)
def find_next_run_number(self, dir, prefix):
"""
Returns the next run number in a directory for the specified prefix.
Examines the contents the specified directory for files with the
specified prefix, extracts the run numbers from each file name,
and returns the next run number after the largest it finds.
"""
x = re.compile(re.escape(prefix) + '-([0-9]+).*')
matches = [x.match(e) for e in os.listdir(dir)]
matches = [_f for _f in matches if _f]
if not matches:
return 0
run_numbers = [int(m.group(1)) for m in matches]
return int(max(run_numbers)) + 1
def gnuplot_results(self, results, fmt='%s %.3f'):
"""
Prints out a set of results in Gnuplot format.
"""
gp = Gnuplotter(self.title, self.key_location)
for i in sorted(results.keys()):
try:
t = self.run_titles[i]
except IndexError:
t = '??? %s ???' % i
results[i].sort()
gp.line(results[i], i+1, t, None, t, fmt=fmt)
for bar_tuple in self.vertical_bars:
try:
x, type, label, comment = bar_tuple
except ValueError:
x, type, label = bar_tuple
comment = label
gp.vertical_bar(x, type, label, comment)
gp.draw()
def logfile_name(self, invocation):
"""
Returns the absolute path of a log file for the specificed
invocation number.
"""
name = self.prefix_run + '-%d.log' % invocation
return os.path.join(self.outdir, name)
def outdent(self, s):
"""
Strip as many spaces from each line as are found at the beginning
of the first line in the list.
"""
lines = s.split('\n')
if lines[0] == '':
lines = lines[1:]
spaces = re.match(' *', lines[0]).group(0)
def strip_initial_spaces(l, s=spaces):
if l.startswith(spaces):
l = l[len(spaces):]
return l
return '\n'.join([ strip_initial_spaces(l) for l in lines ]) + '\n'
def profile_name(self, invocation):
"""
Returns the absolute path of a profile file for the specified
invocation number.
"""
name = self.prefix_run + '-%d.prof' % invocation
return os.path.join(self.outdir, name)
def set_env(self, key, value):
os.environ[key] = value
#
def get_debug_times(self, file, time_string=None):
"""
Fetch times from the --debug=time strings in the specified file.
"""
if time_string is None:
search_string = self.time_string_all
else:
search_string = time_string
contents = open(file).read()
if not contents:
sys.stderr.write('file %s has no contents!\n' % repr(file))
return None
result = re.findall(r'%s: ([\d\.]*)' % search_string, contents)[-4:]
result = [ float(r) for r in result ]
if not time_string is None:
try:
result = result[0]
except IndexError:
sys.stderr.write('file %s has no results!\n' % repr(file))
return None
return result
def get_function_profile(self, file, function):
"""
Returns the file, line number, function name, and cumulative time.
"""
try:
import pstats
except ImportError, e:
sys.stderr.write('%s: func: %s\n' % (self.name, e))
sys.stderr.write('%s This version of Python is missing the profiler.\n' % self.name_spaces)
sys.stderr.write('%s Cannot use the "func" subcommand.\n' % self.name_spaces)
sys.exit(1)
statistics = pstats.Stats(file).stats
matches = [ e for e in statistics.items() if e[0][2] == function ]
r = matches[0]
return r[0][0], r[0][1], r[0][2], r[1][3]
def get_function_time(self, file, function):
"""
Returns just the cumulative time for the specified function.
"""
return self.get_function_profile(file, function)[3]
def get_memory(self, file, memory_string=None):
"""
Returns a list of integers of the amount of memory used. The
default behavior is to return all the stages.
"""
if memory_string is None:
search_string = self.memory_string_all
else:
search_string = memory_string
lines = open(file).readlines()
lines = [ l for l in lines if l.startswith(search_string) ][-4:]
result = [ int(l.split()[-1]) for l in lines[-4:] ]
if len(result) == 1:
result = result[0]
return result
def get_object_counts(self, file, object_name, index=None):
"""
Returns the counts of the specified object_name.
"""
object_string = ' ' + object_name + '\n'
lines = open(file).readlines()
line = [ l for l in lines if l.endswith(object_string) ][0]
result = [ int(field) for field in line.split()[:4] ]
if index is not None:
result = result[index]
return result
#
command_alias = {}
def execute_subcommand(self, argv):
"""
Executes the do_*() function for the specified subcommand (argv[0]).
"""
if not argv:
return
cmdName = self.command_alias.get(argv[0], argv[0])
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError, e:
sys.stderr.write("%s %s: %s\n" % (self.name, cmdName, e))
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.write("Try '%s help %s'\n" % (self.name, cmdName))
def default(self, argv):
"""
The default behavior for an unknown subcommand. Prints an
error message and exits.
"""
sys.stderr.write('%s: Unknown subcommand "%s".\n' % (self.name, argv[0]))
sys.stderr.write('Type "%s help" for usage.\n' % self.name)
sys.exit(1)
#
def do_help(self, argv):
"""
"""
if argv[1:]:
for arg in argv[1:]:
try:
func = getattr(self, 'do_' + arg)
except AttributeError:
sys.stderr.write('%s: No help for "%s"\n' % (self.name, arg))
else:
try:
help = getattr(self, 'help_' + arg)
except AttributeError:
sys.stdout.write(self.doc_to_help(func))
sys.stdout.flush()
else:
help()
else:
doc = self.doc_to_help(self.__class__)
if doc:
sys.stdout.write(doc)
sys.stdout.flush()
return None
#
def help_func(self):
help = """\
Usage: scons-time func [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
--func=NAME, --function=NAME Report time for function NAME
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_func(self, argv):
"""
"""
format = 'ascii'
function_name = '_main'
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'func=',
'function=',
'help',
'prefix=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('--func', '--function'):
function_name = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'func'])
sys.exit(0)
elif o in ('--max',):
max_time = int(a)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if self.chdir:
os.chdir(self.chdir)
if not args:
pattern = '%s*.prof' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: func: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.prof files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help func" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
for file in args:
try:
f, line, func, time = \
self.get_function_profile(file, function_name)
except ValueError, e:
sys.stderr.write("%s: func: %s: %s\n" %
(self.name, file, e))
else:
if f.startswith(cwd_):
f = f[len(cwd_):]
print "%.3f %s:%d(%s)" % (time, f, line, func)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_function_time,
function_name)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: func: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
#
def help_mem(self):
help = """\
Usage: scons-time mem [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_mem(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'mem'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: mem: Unrecognized stage "%s".\n' % (self.name, a))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: mem: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help mem" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_memory, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_memory,
self.stage_strings[stage])
self.gnuplot_results(results)
else:
sys.stderr.write('%s: mem: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_obj(self):
help = """\
Usage: scons-time obj [OPTIONS] OBJECT FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--stage=STAGE Plot memory at the specified stage:
pre-read, post-read, pre-build,
post-build (default: post-build)
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--title=TITLE Specify the output plot TITLE
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_obj(self, argv):
format = 'ascii'
logfile_path = lambda x: x
stage = self.default_stage
tail = None
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'stage=',
'tail=',
'title=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'obj'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--stage',):
if not a in self.stages:
sys.stderr.write('%s: obj: Unrecognized stage "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
stage = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
if not args:
sys.stderr.write('%s: obj: Must specify an object name.\n' % self.name)
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
object_name = args.pop(0)
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: obj: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help obj" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
self.ascii_table(args, tuple(self.stages), self.get_object_counts, logfile_path, object_name)
elif format == 'gnuplot':
stage_index = 0
for s in self.stages:
if stage == s:
break
stage_index = stage_index + 1
results = self.collect_results(args, self.get_object_counts,
object_name, stage_index)
self.gnuplot_results(results)
else:
sys.stderr.write('%s: obj: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
return 0
#
def help_run(self):
help = """\
Usage: scons-time run [OPTIONS] [FILE ...]
--aegis=PROJECT Use SCons from the Aegis PROJECT
--chdir=DIR Name of unpacked directory for chdir
-f FILE, --file=FILE Read configuration from specified FILE
-h, --help Print this help and exit
-n, --no-exec No execute, just print command lines
--number=NUMBER Put output in files for run NUMBER
--outdir=OUTDIR Put output files in OUTDIR
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
--python=PYTHON Time using the specified PYTHON
-q, --quiet Don't print command lines
--scons=SCONS Time using the specified SCONS
--svn=URL, --subversion=URL Use SCons from Subversion URL
-v, --verbose Display output of commands
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_run(self, argv):
"""
"""
run_number_list = [None]
short_opts = '?f:hnp:qs:v'
long_opts = [
'aegis=',
'file=',
'help',
'no-exec',
'number=',
'outdir=',
'prefix=',
'python=',
'quiet',
'scons=',
'svn=',
'subdir=',
'subversion=',
'verbose',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('--aegis',):
self.aegis_project = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'run'])
sys.exit(0)
elif o in ('-n', '--no-exec'):
self.execute = self._do_not_execute
elif o in ('--number',):
run_number_list = self.split_run_numbers(a)
elif o in ('--outdir',):
self.outdir = a
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('--python',):
self.python = a
elif o in ('-q', '--quiet'):
self.display = self._do_not_display
elif o in ('-s', '--subdir'):
self.subdir = a
elif o in ('--scons',):
self.scons = a
elif o in ('--svn', '--subversion'):
self.subversion_url = a
elif o in ('-v', '--verbose'):
self.redirect = tee_to_file
self.verbose = True
self.svn_co_flag = ''
if not args and not self.config_file:
sys.stderr.write('%s: run: No arguments or -f config file specified.\n' % self.name)
sys.stderr.write('%s Type "%s help run" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
if self.config_file:
exec open(self.config_file, 'rU').read() in self.__dict__
if args:
self.archive_list = args
archive_file_name = os.path.split(self.archive_list[0])[1]
if not self.subdir:
self.subdir = self.archive_splitext(archive_file_name)[0]
if not self.prefix:
self.prefix = self.archive_splitext(archive_file_name)[0]
prepare = None
if self.subversion_url:
prepare = self.prep_subversion_run
elif self.aegis_project:
prepare = self.prep_aegis_run
for run_number in run_number_list:
self.individual_run(run_number, self.archive_list, prepare)
def split_run_numbers(self, s):
result = []
for n in s.split(','):
try:
x, y = n.split('-')
except ValueError:
result.append(int(n))
else:
result.extend(list(range(int(x), int(y)+1)))
return result
def scons_path(self, dir):
return os.path.join(dir, 'src', 'script', 'scons.py')
def scons_lib_dir_path(self, dir):
return os.path.join(dir, 'src', 'engine')
def prep_aegis_run(self, commands, removals):
self.aegis_tmpdir = make_temp_file(prefix = self.name + '-aegis-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.aegis_tmpdir))
self.aegis_parent_project = os.path.splitext(self.aegis_project)[0]
self.scons = self.scons_path(self.aegis_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.aegis_tmpdir)
commands.extend([
'mkdir %(aegis_tmpdir)s',
(lambda: os.chdir(self.aegis_tmpdir), 'cd %(aegis_tmpdir)s'),
'%(aegis)s -cp -ind -p %(aegis_parent_project)s .',
'%(aegis)s -cp -ind -p %(aegis_project)s -delta %(run_number)s .',
])
def prep_subversion_run(self, commands, removals):
self.svn_tmpdir = make_temp_file(prefix = self.name + '-svn-')
removals.append((shutil.rmtree, 'rm -rf %%s', self.svn_tmpdir))
self.scons = self.scons_path(self.svn_tmpdir)
self.scons_lib_dir = self.scons_lib_dir_path(self.svn_tmpdir)
commands.extend([
'mkdir %(svn_tmpdir)s',
'%(svn)s co %(svn_co_flag)s -r %(run_number)s %(subversion_url)s %(svn_tmpdir)s',
])
def individual_run(self, run_number, archive_list, prepare=None):
"""
Performs an individual run of the default SCons invocations.
"""
commands = []
removals = []
if prepare:
prepare(commands, removals)
save_scons = self.scons
save_scons_wrapper = self.scons_wrapper
save_scons_lib_dir = self.scons_lib_dir
if self.outdir is None:
self.outdir = self.orig_cwd
elif not os.path.isabs(self.outdir):
self.outdir = os.path.join(self.orig_cwd, self.outdir)
if self.scons is None:
self.scons = self.scons_path(self.orig_cwd)
if self.scons_lib_dir is None:
self.scons_lib_dir = self.scons_lib_dir_path(self.orig_cwd)
if self.scons_wrapper is None:
self.scons_wrapper = self.scons
if not run_number:
run_number = self.find_next_run_number(self.outdir, self.prefix)
self.run_number = str(run_number)
self.prefix_run = self.prefix + '-%03d' % run_number
if self.targets0 is None:
self.targets0 = self.startup_targets
if self.targets1 is None:
self.targets1 = self.targets
if self.targets2 is None:
self.targets2 = self.targets
self.tmpdir = make_temp_file(prefix = self.name + '-')
commands.extend([
'mkdir %(tmpdir)s',
(os.chdir, 'cd %%s', self.tmpdir),
])
for archive in archive_list:
if not os.path.isabs(archive):
archive = os.path.join(self.orig_cwd, archive)
if os.path.isdir(archive):
dest = os.path.split(archive)[1]
commands.append((shutil.copytree, 'cp -r %%s %%s', archive, dest))
else:
suffix = self.archive_splitext(archive)[1]
unpack_command = self.unpack_map.get(suffix)
if not unpack_command:
dest = os.path.split(archive)[1]
commands.append((shutil.copyfile, 'cp %%s %%s', archive, dest))
else:
commands.append(unpack_command + (archive,))
commands.extend([
(os.chdir, 'cd %%s', self.subdir),
])
commands.extend(self.initial_commands)
commands.extend([
(lambda: read_tree('.'),
'find * -type f | xargs cat > /dev/null'),
(self.set_env, 'export %%s=%%s',
'SCONS_LIB_DIR', self.scons_lib_dir),
'%(python)s %(scons_wrapper)s --version',
])
index = 0
for run_command in self.run_commands:
setattr(self, 'prof%d' % index, self.profile_name(index))
c = (
self.log_execute,
self.log_display,
run_command,
self.logfile_name(index),
)
commands.append(c)
index = index + 1
commands.extend([
(os.chdir, 'cd %%s', self.orig_cwd),
])
if not os.environ.get('PRESERVE'):
commands.extend(removals)
commands.append((shutil.rmtree, 'rm -rf %%s', self.tmpdir))
self.run_command_list(commands, self.__dict__)
self.scons = save_scons
self.scons_lib_dir = save_scons_lib_dir
self.scons_wrapper = save_scons_wrapper
#
def help_time(self):
help = """\
Usage: scons-time time [OPTIONS] FILE [...]
-C DIR, --chdir=DIR Change to DIR before looking for files
-f FILE, --file=FILE Read configuration from specified FILE
--fmt=FORMAT, --format=FORMAT Print data in specified FORMAT
-h, --help Print this help and exit
-p STRING, --prefix=STRING Use STRING as log file/profile prefix
-t NUMBER, --tail=NUMBER Only report the last NUMBER files
--which=TIMER Plot timings for TIMER: total,
SConscripts, SCons, commands.
"""
sys.stdout.write(self.outdent(help))
sys.stdout.flush()
def do_time(self, argv):
format = 'ascii'
logfile_path = lambda x: x
tail = None
which = 'total'
short_opts = '?C:f:hp:t:'
long_opts = [
'chdir=',
'file=',
'fmt=',
'format=',
'help',
'prefix=',
'tail=',
'title=',
'which=',
]
opts, args = getopt.getopt(argv[1:], short_opts, long_opts)
for o, a in opts:
if o in ('-C', '--chdir'):
self.chdir = a
elif o in ('-f', '--file'):
self.config_file = a
elif o in ('--fmt', '--format'):
format = a
elif o in ('-?', '-h', '--help'):
self.do_help(['help', 'time'])
sys.exit(0)
elif o in ('-p', '--prefix'):
self.prefix = a
elif o in ('-t', '--tail'):
tail = int(a)
elif o in ('--title',):
self.title = a
elif o in ('--which',):
if not a in self.time_strings.keys():
sys.stderr.write('%s: time: Unrecognized timer "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
which = a
if self.config_file:
HACK_for_exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
logfile_path = lambda x: os.path.join(self.chdir, x)
if not args:
pattern = '%s*.log' % self.prefix
args = self.args_to_files([pattern], tail)
if not args:
if self.chdir:
directory = self.chdir
else:
directory = os.getcwd()
sys.stderr.write('%s: time: No arguments specified.\n' % self.name)
sys.stderr.write('%s No %s*.log files found in "%s".\n' % (self.name_spaces, self.prefix, directory))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
else:
args = self.args_to_files(args, tail)
cwd_ = os.getcwd() + os.sep
if format == 'ascii':
columns = ("Total", "SConscripts", "SCons", "commands")
self.ascii_table(args, columns, self.get_debug_times, logfile_path)
elif format == 'gnuplot':
results = self.collect_results(args, self.get_debug_times,
self.time_strings[which])
self.gnuplot_results(results, fmt='%s %.6f')
else:
sys.stderr.write('%s: time: Unknown format "%s".\n' % (self.name, format))
sys.exit(1)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'h?V', ['help', 'version'])
ST = SConsTimer()
for o, a in opts:
if o in ('-?', '-h', '--help'):
ST.do_help(['help'])
sys.exit(0)
elif o in ('-V', '--version'):
sys.stdout.write('scons-time version\n')
sys.exit(0)
if not args:
sys.stderr.write('Type "%s help" for usage.\n' % ST.name)
sys.exit(1)
ST.execute_subcommand(args)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/testing/cost.py | 34 | 3350 | """ Simple costs to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from functools import wraps
from pylearn2.costs.cost import Cost
from pylearn2.space import NullSpace
from pylearn2.utils import CallbackOp
from pylearn2.utils import safe_zip
from pylearn2.utils.data_specs import DataSpecsMapping
class CallbackCost(Cost):
"""
A Cost that runs callbacks on the data. Returns the sum of the data
multiplied by the sum of all model parameters as the cost. The callback is
run via the CallbackOp so the cost must be used to compute one of the
outputs of your theano graph if you want the callback to get called. The
is cost is designed so that the SGD algorithm will result in in the
CallbackOp getting evaluated.
Parameters
----------
data_callback : optional, callbacks to run on data.
It is either a Python callable, or a tuple (possibly nested),
in the same format as data_specs.
data_specs : (space, source) pair specifying the format
and label associated to the data.
"""
def __init__(self, data_callbacks, data_specs):
self.data_callbacks = data_callbacks
self.data_specs = data_specs
self._mapping = DataSpecsMapping(data_specs)
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
return self.data_specs
@wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
callbacks = self.data_callbacks
cb_tuple = self._mapping.flatten(callbacks, return_tuple=True)
data_tuple = self._mapping.flatten(data, return_tuple=True)
costs = []
for (callback, data_var) in safe_zip(cb_tuple, data_tuple):
orig_var = data_var
data_var = CallbackOp(callback)(data_var)
assert len(data_var.owner.inputs) == 1
assert orig_var is data_var.owner.inputs[0]
costs.append(data_var.sum())
# sum() will call theano.add on the symbolic variables
cost = sum(costs)
model_terms = sum([param.sum() for param in model.get_params()])
cost = cost * model_terms
return cost
class SumOfParams(Cost):
"""
A cost that is just the sum of all parameters, so the gradient
on every parameter is 1.
"""
@wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
return sum(param.sum() for param in model.get_params())
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
# This cost does not need any data
return (NullSpace(), '')
class SumOfOneHalfParamsSquared(Cost):
"""
A cost that is just 0.5 * the sum of all parameters squared, so the
gradient on every parameter is the parameter itself.
"""
@wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
return 0.5 * sum((param**2).sum() for param in model.get_params())
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
# This cost does not need any data
return (NullSpace(), '')
| bsd-3-clause |
2ndQuadrant/ansible | test/units/modules/network/cnos/test_cnos_command.py | 45 | 4330 | # Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_command
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosCommandModule(TestCnosModule):
module = cnos_command
def setUp(self):
super(TestCnosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.cnos.cnos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestCnosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item
except ValueError:
command = 'show version'
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_cnos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Lenovo Networking Operating System (NOS) Software'))
def test_cnos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show running-config']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Lenovo Networking Operating System (NOS) Software'))
def test_cnos_command_wait_for(self):
wait_for = 'result[0] contains "Lenovo Networking Operating System (NOS) Software"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_cnos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_cnos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_cnos_command_match_any(self):
wait_for = ['result[0] contains "Lenovo Networking Operating System (NOS) Software"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_cnos_command_match_all(self):
wait_for = ['result[0] contains "Lenovo Networking Operating System (NOS) Software"',
'result[0] contains "Lenovo"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_cnos_command_match_all_failure(self):
wait_for = ['result[0] contains "Lenovo ENOS"',
'result[0] contains "test string"']
commands = ['show version', 'show run']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
tectronics/rebuildingtogethercaptain | rest/__init__.py | 2 | 112205 | #!/usr/bin/env python
#
# Copyright (c) 2012 Boomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Rest handler for appengine Models.
To use with an existing application:
import rest
# add a handler for REST calls
application = webapp.WSGIApplication([
<... existing webservice urls ...>
('/rest/.*', rest.Dispatcher)
], ...)
# configure the rest dispatcher to know what prefix to expect on request
# urls
rest.Dispatcher.base_url = '/rest'
# add all models from the current module, and/or...
rest.Dispatcher.add_models_from_module(__name__)
# add all models from some other module, and/or...
rest.Dispatcher.add_models_from_module(my_model_module)
# add specific models (with given names)
rest.Dispatcher.add_models({
'foo' : FooModel,
'bar' : BarModel})
# add specific models (with given names) and restrict the supported methods
rest.Dispatcher.add_models({
'foo' : (FooModel, rest.READ_ONLY_MODEL_METHODS),
'bar' : (BarModel, ['GET_METADATA', 'GET', 'POST', 'PUT'],
'cache' : (CacheModel, ['GET', 'DELETE'] })
# use custom authentication/authorization
rest.Dispatcher.authenticator = MyAuthenticator()
rest.Dispatcher.authorizer = MyAuthorizer()
"""
import types
import logging
import re
import base64
import cgi
import pickle
import os
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.api import namespace_manager
from google.appengine.ext.db import metadata
from xml.dom import minidom
from datetime import datetime
# use faster json if available
try:
import json
except ImportError:
from django.utils import simplejson as json
# compatibility w/ python27 & webapp2
try:
# we only get the _real_ webapp2 in python27
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
import webapp2 as webapp
else:
raise ImportError("no webapp2 available")
COMPAT_WEBAPP2 = True
except ImportError:
from google.appengine.ext import webapp
COMPAT_WEBAPP2 = False
def get_instance_type_name(value):
"""Returns the name of the type of the given instance."""
return get_type_name(type(value))
def get_type_name(value_type):
"""Returns the name of the given type."""
return value_type.__name__
METADATA_PATH = "metadata"
CONTENT_PATH = "content"
BLOBUPLOADRESULT_PATH = "__blob_result"
MAX_FETCH_PAGE_SIZE = 1000
XML_CLEANSE_PATTERN1 = re.compile(r"^(\d)")
XML_CLEANSE_REPL1 = r"_\1"
XML_CLEANSE_PATTERN2 = re.compile(r"[^a-zA-Z0-9_\-]")
XML_CLEANSE_REPL2 = r"_"
EMPTY_VALUE = object()
MULTI_UPDATE_KEY = object()
KEY_PROPERTY_NAME = "key"
KEY_PROPERTY_TYPE_NAME = "KeyProperty"
KEY_QUERY_FIELD = "__key__"
TYPES_EL_NAME = "types"
TYPE_EL_NAME = "type"
LIST_EL_NAME = "list"
TYPE_ATTR_NAME = "type"
NAME_ATTR_NAME = "name"
BASE_ATTR_NAME = "base"
ETAG_ATTR_NAME = "etag"
PROPERTY_ATTR_NAME = "property"
REQUIRED_ATTR_NAME = "required"
DEFAULT_ATTR_NAME = "default"
INDEXED_ATTR_NAME = "indexed"
MULTILINE_ATTR_NAME = "multiline"
VERBOSENAME_ATTR_NAME = "verbose_name"
REFERENCECLASS_ATTR_NAME = "reference_class"
MODELNS_ATTR_NAME = "model_ns"
ITEM_EL_NAME = "item"
DATA_TYPE_SEPARATOR = ":"
DATE_TIME_SEP = "T"
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT_NO_MS = "%H:%M:%S"
DATE_TIME_FORMAT_NO_MS = DATE_FORMAT + DATE_TIME_SEP + TIME_FORMAT_NO_MS
TRUE_VALUE = "true"
TRUE_NUMERIC_VALUE = "1"
CONTENT_TYPE_HEADER = "Content-Type"
XML_CONTENT_TYPE = "application/xml"
TEXT_CONTENT_TYPE = "text/plain"
JSON_CONTENT_TYPE = "application/json"
METHOD_OVERRIDE_HEADER = "X-HTTP-Method-Override"
RANGE_HEADER = "Range"
BINARY_CONTENT_TYPE = "application/octet-stream"
FORMDATA_CONTENT_TYPE = "multipart/form-data"
ETAG_HEADER = "ETag"
JSON_TEXT_KEY = "#text"
JSON_ATTR_PREFIX = "@"
XML_ENCODING = "utf-8"
XSD_PREFIX = "xs"
XSD_ATTR_XMLNS = "xmlns:" + XSD_PREFIX
XSD_NS = "http://www.w3.org/2001/XMLSchema"
XSD_SCHEMA_NAME = XSD_PREFIX + ":schema"
XSD_ELEMENT_NAME = XSD_PREFIX + ":element"
XSD_ATTRIBUTE_NAME = XSD_PREFIX + ":attribute"
XSD_COMPLEXTYPE_NAME = XSD_PREFIX + ":complexType"
XSD_SIMPLECONTENT_NAME = XSD_PREFIX + ":simpleContent"
XSD_EXTENSION_NAME = XSD_PREFIX + ":extension"
XSD_SEQUENCE_NAME = XSD_PREFIX + ":sequence"
XSD_ANY_NAME = XSD_PREFIX + ":any"
XSD_ANNOTATION_NAME = XSD_PREFIX + ":annotation"
XSD_APPINFO_NAME = XSD_PREFIX + ":appinfo"
XSD_DOCUMENTATION_NAME = XSD_PREFIX + ":documentation"
XSD_FILTER_PREFIX = "bm"
XSD_ATTR_FILTER_XMLNS = "xmlns:" + XSD_FILTER_PREFIX
XSD_FILTER_NS = "http://www.boomi.com/connector/annotation"
XSD_FILTER_NAME = XSD_FILTER_PREFIX + ":filter"
XSD_ATTR_MINOCCURS = "minOccurs"
XSD_ATTR_MAXOCCURS = "maxOccurs"
XSD_ATTR_NAMESPACE = "namespace"
XSD_ATTR_PROCESSCONTENTS = "processContents"
XSD_ATTR_NOFILTER = "ignore"
XSD_ANY_NAMESPACE = "##any"
XSD_LAX_CONTENTS = "lax"
XSD_NO_MIN = "0"
XSD_SINGLE_MAX = "1"
XSD_NO_MAX = "unbounded"
XSD_NORMAL_STR = XSD_PREFIX + ":normalizedString"
BLOBINFO_TYPE_NAME = "BlobInfo"
REST_MD_URI = "uri:com.boomi.rest"
REST_MD_PREFIX = "rest"
REST_MD_XMLNS = "xmlns:" + REST_MD_PREFIX
REST_MD_METADATA_NAME = REST_MD_PREFIX + ":metadata"
REST_MD_CHOICES_NAME = REST_MD_PREFIX + ":choices"
REST_MD_CHOICE_NAME = REST_MD_PREFIX + ":choice"
REST_MD_DEFAULT_NAME = REST_MD_PREFIX + ":default"
REST_MD_ITEM_NAME = REST_MD_PREFIX + ":item"
ALL_MODEL_METHODS = frozenset(["GET", "POST", "PUT", "DELETE", "GET_METADATA"])
READ_ONLY_MODEL_METHODS = frozenset(["GET", "GET_METADATA"])
READ_EXT_NS = "READ"
WRITE_EXT_NS = "WRITE"
HIDDEN_EXT_NAMESPACES = frozenset([])
READ_ONLY_EXT_NAMESPACES = frozenset([READ_EXT_NS])
FULL_EXT_NAMESPACES = frozenset([READ_EXT_NS, WRITE_EXT_NS])
QUERY_OFFSET_PARAM = "offset"
QUERY_CURSOR_PREFIX = "c_"
QUERY_PAGE_SIZE_PARAM = "page_size"
QUERY_ORDERING_PARAM = "ordering"
QUERY_TERM_PATTERN = re.compile(r"^(f.._)(.+)$")
QUERY_PREFIX = "WHERE "
QUERY_JOIN = " AND "
QUERY_ORDERBY = " ORDER BY "
QUERY_ORDER_SUFFIXES = [" ASC", " DESC"]
QUERY_ORDER_PREFIXES = ["", "-"]
QUERY_ORDER_ASC_IDX = 0
QUERY_ORDER_DSC_IDX = 1
QUERY_LIST_TYPE = "fin_"
QUERY_TYPE_PARAM = "type"
QUERY_TYPE_FULL = "full"
# deprecated value, (really means xml or json depending on headers, use
# "structured" instead)
QUERY_TYPE_XML = "xml"
QUERY_TYPE_STRUCTURED = "structured"
QUERY_BLOBINFO_PARAM = "blobinfo"
QUERY_BLOBINFO_TYPE_KEY = "key"
QUERY_BLOBINFO_TYPE_INFO = "info"
QUERY_CALLBACK_PARAM = "callback"
QUERY_INCLUDEPROPS_PARAM = "include_props"
EXTRA_QUERY_PARAMS = frozenset([QUERY_BLOBINFO_PARAM, QUERY_CALLBACK_PARAM,
QUERY_INCLUDEPROPS_PARAM])
QUERY_EXPRS = {
"feq_": "%s = :%d",
"flt_": "%s < :%d",
"fgt_": "%s > :%d",
"fle_": "%s <= :%d",
"fge_": "%s >= :%d",
"fne_": "%s != :%d",
QUERY_LIST_TYPE: "%s IN :%d"}
DATA_TYPE_TO_PROPERTY_TYPE = {
"basestring": db.StringProperty,
"str": db.StringProperty,
"unicode": db.StringProperty,
"bool": db.BooleanProperty,
"int": db.IntegerProperty,
"long": db.IntegerProperty,
"float": db.FloatProperty,
"Key": db.ReferenceProperty,
"datetime": db.DateTimeProperty,
"date": db.DateProperty,
"time": db.TimeProperty,
"Blob": db.BlobProperty,
"BlobKey": blobstore.BlobReferenceProperty,
"ByteString": db.ByteStringProperty,
"Text": db.TextProperty,
"User": db.UserProperty,
"Category": db.CategoryProperty,
"Link": db.LinkProperty,
"Email": db.EmailProperty,
"GeoPt": db.GeoPtProperty,
"IM": db.IMProperty,
"PhoneNumber": db.PhoneNumberProperty,
"PostalAddress": db.PostalAddressProperty,
"Rating": db.RatingProperty,
"list": db.ListProperty,
"tuple": db.ListProperty}
PROPERTY_TYPE_TO_XSD_TYPE = {
get_type_name(db.StringProperty): XSD_PREFIX + ":string",
get_type_name(db.BooleanProperty): XSD_PREFIX + ":boolean",
get_type_name(db.IntegerProperty): XSD_PREFIX + ":long",
get_type_name(db.FloatProperty): XSD_PREFIX + ":double",
get_type_name(db.ReferenceProperty): XSD_NORMAL_STR,
get_type_name(db.DateTimeProperty): XSD_PREFIX + ":dateTime",
get_type_name(db.DateProperty): XSD_PREFIX + ":date",
get_type_name(db.TimeProperty): XSD_PREFIX + ":time",
get_type_name(db.BlobProperty): XSD_PREFIX + ":base64Binary",
get_type_name(db.ByteStringProperty): XSD_PREFIX + ":base64Binary",
get_type_name(blobstore.BlobReferenceProperty): BLOBINFO_TYPE_NAME,
get_type_name(db.TextProperty): XSD_PREFIX + ":string",
get_type_name(db.UserProperty): XSD_NORMAL_STR,
get_type_name(db.CategoryProperty): XSD_NORMAL_STR,
get_type_name(db.LinkProperty): XSD_PREFIX + ":anyURI",
get_type_name(db.EmailProperty): XSD_NORMAL_STR,
get_type_name(db.GeoPtProperty): XSD_NORMAL_STR,
get_type_name(db.IMProperty): XSD_NORMAL_STR,
get_type_name(db.PhoneNumberProperty): XSD_NORMAL_STR,
get_type_name(db.PostalAddressProperty): XSD_NORMAL_STR,
get_type_name(db.RatingProperty): XSD_PREFIX + ":integer",
KEY_PROPERTY_TYPE_NAME: XSD_NORMAL_STR}
PROPERTY_TYPE_TO_JSON_TYPE = {
get_type_name(db.IntegerProperty): long,
get_type_name(db.FloatProperty): float,
get_type_name(db.RatingProperty): long}
class KeyPseudoType(object):
"""Fake PropertyType class for the KeyHandler class."""
def __init__(self):
self.verbose_name = None
self.default = None
self.required = True
self.choices = None
self.indexed = True
self.name = KEY_PROPERTY_NAME
def empty(self, value):
"""Returns True if the value is any value which evaluates to False,
False otherwise."""
return not value
KEY_PROPERTY_TYPE = KeyPseudoType()
def parse_date_time(dt_str, dt_format, dt_type, allows_microseconds):
"""Returns a datetime/date/time instance parsed from the given string
using the given format info."""
micros = None
if(allows_microseconds):
dt_parts = dt_str.rsplit(".", 1)
dt_str = dt_parts.pop(0)
if(len(dt_parts) > 0):
micros = int(dt_parts[0].ljust(6, "0")[:6])
dtime = datetime.strptime(dt_str, dt_format)
if(micros):
dtime = dtime.replace(microsecond=micros)
if(dt_type is datetime.date):
dtime = dtime.date()
elif(dt_type is datetime.time):
dtime = dtime.time()
return dtime
def convert_to_valid_xml_name(name):
"""Converts a string to a valid xml element name."""
name = re.sub(XML_CLEANSE_PATTERN1, XML_CLEANSE_REPL1, name)
return re.sub(XML_CLEANSE_PATTERN2, XML_CLEANSE_REPL2, name)
def append_child(parent_el, name, content=None, meta=None):
"""Returns a new xml element with the given name and optional text content
appended to the given parent element."""
doc = parent_el.ownerDocument
elm = doc.createElement(name)
parent_el.appendChild(elm)
if content:
txt_node = doc.createTextNode(content)
elm.appendChild(txt_node)
if meta:
txt_node.disp_meta_ = meta
return elm
def xsd_append_sequence(parent_el):
"""Returns an XML Schema sub-sequence (complex type, then sequence)
appended to the given parent element."""
ctype_el = append_child(parent_el, XSD_COMPLEXTYPE_NAME)
seq_el = append_child(ctype_el, XSD_SEQUENCE_NAME)
return seq_el
def xsd_append_nofilter(annotation_el):
"""Returns Boomi XML Schema no filter annotation appended to the given
parent element."""
child_el = append_child(annotation_el, XSD_APPINFO_NAME)
filter_el = append_child(child_el, XSD_FILTER_NAME)
filter_el.attributes[XSD_ATTR_FILTER_XMLNS] = XSD_FILTER_NS
filter_el.attributes[XSD_ATTR_NOFILTER] = TRUE_VALUE
return filter_el
def xsd_append_rest_metadata(annotation_el):
"""Returns rest metadata annotation appended to the given parent
element."""
element_el = append_child(annotation_el, XSD_APPINFO_NAME)
element_el = append_child(element_el, REST_MD_METADATA_NAME)
element_el.attributes[REST_MD_XMLNS] = REST_MD_URI
return element_el
def xsd_append_choices(parent_el, prop_handler, choices):
"""Appends a list of choice metadata elements."""
choice_parent = append_child(parent_el, REST_MD_CHOICES_NAME)
for choice in choices:
append_child(choice_parent, REST_MD_CHOICE_NAME,
prop_handler.value_to_string(choice))
return choice_parent
def xsd_append_element(parent_el, name, prop_type_name, min_occurs,
max_occurs):
"""Returns an XML Schema element with the given attributes appended to the
given parent element."""
element_el = append_child(parent_el, XSD_ELEMENT_NAME)
element_el.attributes[NAME_ATTR_NAME] = name
type_name = PROPERTY_TYPE_TO_XSD_TYPE.get(prop_type_name, None)
if type_name:
element_el.attributes[TYPE_ATTR_NAME] = type_name
if min_occurs is not None:
element_el.attributes[XSD_ATTR_MINOCCURS] = min_occurs
if max_occurs is not None:
element_el.attributes[XSD_ATTR_MAXOCCURS] = max_occurs
return element_el
def xsd_append_attribute(parent_el, name, prop_type_name, def_type=None):
"""Returns an XML Schema attribute with the given attributes appended to
the given parent element."""
attr_el = append_child(parent_el, XSD_ATTRIBUTE_NAME)
attr_el.attributes[NAME_ATTR_NAME] = name
type_name = PROPERTY_TYPE_TO_XSD_TYPE.get(prop_type_name, def_type)
if type_name:
attr_el.attributes[TYPE_ATTR_NAME] = type_name
return attr_el
def get_node_text(node_list, do_strip=False):
"""Returns the complete text from the given node list (optionally
stripped) or None if the list is empty."""
if(len(node_list) == 0):
return None
text = u""
for node in node_list:
if node.nodeType == node.TEXT_NODE:
text += node.data
if do_strip:
text = text.strip()
return text
def xml_to_json(xml_doc):
"""Returns a serialized json doc string generated from the given xml
doc."""
doc_el = xml_doc.documentElement
json_doc = {doc_el.nodeName: xml_node_to_json(doc_el)}
return json.dumps(json_doc)
def xml_node_to_json(xml_node):
"""Returns a json node generated from the given xml element."""
if((len(xml_node.childNodes) == 1) and
(xml_node.childNodes[0].nodeType == xml_node.TEXT_NODE)):
txt_node = xml_node.childNodes[0]
txt_value = json_value(txt_node, txt_node.data)
if(len(xml_node.attributes) == 0):
return txt_value
else:
json_node = {}
json_node[JSON_TEXT_KEY] = txt_value
xml_attrs_to_json(xml_node, json_node)
return json_node
else:
json_node = {}
for child_xml_node in xml_node.childNodes:
new_child_json_node = xml_node_to_json(child_xml_node)
cur_child_json_node = json_node.get(child_xml_node.nodeName, None)
if(cur_child_json_node is None):
cur_child_json_node = new_child_json_node
else:
# if we have more than one of the same type, turn the children
# into a list
if(not isinstance(cur_child_json_node, types.ListType)):
cur_child_json_node = [cur_child_json_node]
cur_child_json_node.append(new_child_json_node)
json_node[child_xml_node.nodeName] = cur_child_json_node
xml_attrs_to_json(xml_node, json_node)
return json_node
def xml_attrs_to_json(xml_node, json_node):
"""Adds any attributes of the given xml node to the given json node."""
xml_node_attrs = xml_node.attributes
for attr_name in xml_node_attrs.keys():
attr_node = xml_node_attrs[attr_name]
json_node[JSON_ATTR_PREFIX + attr_name] = (
json_value(attr_node, attr_node.nodeValue))
def json_value(xml_node, xml_node_value):
"""Returns the given xml node's value as the appropriate json type."""
if hasattr(xml_node, "disp_meta_"):
json_type = PROPERTY_TYPE_TO_JSON_TYPE.get(
get_instance_type_name(xml_node.disp_meta_))
if(json_type):
xml_node_value = json_type(xml_node_value)
return xml_node_value
def json_to_xml(json_doc):
"""Returns an xml document generated from the given json doc."""
json_node = json.load(json_doc)
impl = minidom.getDOMImplementation()
doc_el_name = json_node.keys()[0]
xml_doc = impl.createDocument(None, doc_el_name, None)
json_node_to_xml(xml_doc.documentElement, json_node[doc_el_name])
return xml_doc
def json_node_to_xml(xml_node, json_node):
"""Appends an xml node generated from the given json node to the given xml
node."""
doc = xml_node.ownerDocument
if(isinstance(json_node, (basestring, int, long, float, complex, bool))):
xml_node.appendChild(doc.createTextNode(unicode(json_node)))
else:
for json_node_name, json_node_value in json_node.iteritems():
if(json_node_name[0] == JSON_ATTR_PREFIX):
xml_node.attributes[json_node_name[1:]] = json_node_value
elif(json_node_name == JSON_TEXT_KEY):
xml_node.appendChild(doc.createTextNode(json_node_value))
else:
if(not isinstance(json_node_value, types.ListType)):
json_node_value = [json_node_value]
for json_node_list_value in json_node_value:
child_node = append_child(xml_node, json_node_name)
json_node_to_xml(child_node, json_node_list_value)
def is_list_type(obj):
"""Returns True if the given obj is a 'list' type of some sort, False
otherwise."""
return isinstance(obj, (types.ListType, types.TupleType))
def model_hash_to_str(model_hash):
"""Returns the model hash int as an encoded string."""
return base64.b64encode(str(model_hash)).strip('=')
class PropertyHandler(object):
"""Base handler for Model properties which manages converting properties
to and from xml.
This implementation works for most properties which have simple to/from
string conversions.
"""
def __init__(self, property_name, property_type,
property_content_type=TEXT_CONTENT_TYPE):
self.property_name = property_name
self.storage_name = None
if self.property_name != property_type.name:
self.storage_name = property_type.name
self.property_type = property_type
self.property_content_type = property_content_type
# most types can be parsed from stripped strings, but don't strip text
# data
self.strip_on_read = True
if(isinstance(property_type, (db.StringProperty, db.TextProperty))):
self.strip_on_read = False
def get_query_field(self):
"""Returns the field name which should be used to query this
property."""
return self.property_type.name
def can_query(self):
"""Returns True if this property can be used as a query filter, False
otherwise."""
return self.property_type.indexed
def get_data_type(self):
"""Returns the type of data this property accepts."""
return self.property_type.data_type
def empty(self, value):
"""Tests a property value for empty in a manner specific to the
property type."""
return self.property_type.empty(value)
def get_type_string(self):
"""Returns the type string describing this property."""
return get_instance_type_name(self.property_type)
def get_value(self, model):
"""Returns the value for this property from the given model
instance."""
return getattr(model, self.property_name)
def get_value_as_string(self, model):
"""Returns the value for this property from the given model instance
as a string (or EMPTY_VALUE if the value is empty as defined by this
property type), used by the default write_xml_value() method."""
value = self.get_value(model)
if(self.empty(value)):
return EMPTY_VALUE
return self.value_to_string(value)
def value_to_string(self, value):
"""Returns the given property value as a string, used by the default
get_value_as_string() method."""
return unicode(value)
def value_from_xml_string(self, value):
"""Returns the value for this property from the given string value
(may be None), used by the default read_xml_value() method."""
if((value is None) or (self.strip_on_read and not value)):
return None
if(not isinstance(value, self.get_data_type())):
value = self.get_data_type()(value)
return value
def value_from_raw_string(self, value):
"""Returns the value for this property from the given 'raw' string
value (may be None), used by the default value_from_request() method.
Default impl returns value_from_xml_string(value)."""
if((value is not None) and self.strip_on_read):
value = value.strip()
return self.value_from_xml_string(value)
def value_for_query(self, value):
"""Returns the value for this property from the given string value
(may be None), for use in a query filter."""
return self.value_from_xml_string(value)
def write_xml_value(self, parent_el, prop_xml_name, model,
blob_info_format):
"""Returns the property value from the given model instance converted
to an xml element and appended to the given parent element."""
value = self.get_value_as_string(model)
if(value is EMPTY_VALUE):
return None
return append_child(parent_el, prop_xml_name, value,
self.property_type)
def read_xml_value(self, props, prop_el):
"""Adds the value for this property to the given property dict
converted from an xml element."""
value = self.value_from_xml_string(get_node_text(prop_el.childNodes,
self.strip_on_read))
props[self.property_name] = value
def write_xsd_metadata(self, parent_el, prop_xml_name):
"""Returns the XML Schema element for this property type appended to
the given parent element."""
prop_el = xsd_append_element(parent_el, prop_xml_name,
self.get_type_string(), XSD_NO_MIN,
XSD_SINGLE_MAX)
annot_el = self.write_xsd_metadata_annotation(prop_el)
if(not self.can_query()):
xsd_append_nofilter(annot_el)
return prop_el
def write_xsd_metadata_annotation(self, prop_el):
"""Writes the additional property metadata annotation element and
returns it."""
annotation_el = append_child(prop_el, XSD_ANNOTATION_NAME)
element_el = xsd_append_rest_metadata(annotation_el)
prop_type = self.property_type
element_el.attributes[REQUIRED_ATTR_NAME] = (
str(prop_type.required).lower())
element_el.attributes[INDEXED_ATTR_NAME] = (
str(prop_type.indexed).lower())
if prop_type.default is not None:
element_el.attributes[DEFAULT_ATTR_NAME] = (
self.value_to_string(prop_type.default))
if prop_type.verbose_name is not None:
element_el.attributes[VERBOSENAME_ATTR_NAME] = (
unicode(prop_type.verbose_name))
if hasattr(prop_type, "multiline"):
element_el.attributes[MULTILINE_ATTR_NAME] = (
str(prop_type.multiline).lower())
if prop_type.choices:
xsd_append_choices(element_el, self, prop_type.choices)
return annotation_el
def value_to_response(self, dispatcher, prop_xml_name, value, path):
"""Writes the output of a single property to the dispatcher's
response."""
dispatcher.set_response_content_type(self.property_content_type)
dispatcher.response.out.write(value)
def value_from_request(self, dispatcher, model, path):
"""Writes a single property from the dispatcher's response."""
value = self.value_from_raw_string(
dispatcher.request.body_file.getvalue())
setattr(model, self.property_name, value)
class DateTimeHandler(PropertyHandler):
"""PropertyHandler for datetime/data/time property instances."""
def __init__(self, property_name, property_type):
super(DateTimeHandler, self).__init__(property_name, property_type)
self.format_args = []
if(isinstance(property_type, db.DateProperty)):
self.dt_format = DATE_FORMAT
self.dt_type = datetime.date
self.allows_microseconds = False
elif(isinstance(property_type, db.TimeProperty)):
self.dt_format = TIME_FORMAT_NO_MS
self.dt_type = datetime.time
self.allows_microseconds = True
elif(isinstance(property_type, db.DateTimeProperty)):
self.dt_format = DATE_TIME_FORMAT_NO_MS
self.dt_type = datetime
self.allows_microseconds = True
self.format_args.append(DATE_TIME_SEP)
else:
raise ValueError(
"unexpected property type %s for DateTimeHandler" %
property_type)
def value_to_string(self, value):
"""Returns the datetime/date/time value converted to the relevant iso
string value."""
value_str = value.isoformat(*self.format_args)
# undo python's idiotic formatting irregularity
if(self.allows_microseconds and not value.microsecond):
value_str += ".000000"
return unicode(value_str)
def value_from_xml_string(self, value):
"""Returns the datetime/date/time parsed from the relevant iso string
value, or None if the string is empty."""
if(not value):
return None
return parse_date_time(value, self.dt_format, self.dt_type,
self.allows_microseconds)
class BooleanHandler(PropertyHandler):
"""PropertyHandler for boolean property instances."""
def __init__(self, property_name, property_type):
super(BooleanHandler, self).__init__(property_name, property_type)
def value_to_string(self, value):
"""Returns the boolean value converted to a string value of 'true' or
'false'."""
return unicode(value).lower()
def value_from_xml_string(self, value):
"""Returns the boolean value parsed from the given string value: True
for the strings 'true' (any case) and '1', False for all other
non-empty strings, and None otherwise."""
if(not value):
return None
value = value.lower()
return ((value == TRUE_VALUE) or (value == TRUE_NUMERIC_VALUE))
class TextHandler(PropertyHandler):
"""PropertyHandler for (large) text property instances."""
def __init__(self, property_name, property_type):
super(TextHandler, self).__init__(property_name, property_type)
def can_query(self):
"""Text properties may not be used in query filters."""
return False
class ByteStringHandler(PropertyHandler):
"""PropertyHandler for ByteString property instances."""
def __init__(self, property_name, property_type):
super(ByteStringHandler, self).__init__(property_name, property_type,
BINARY_CONTENT_TYPE)
def value_to_string(self, value):
"""Returns a ByteString value converted to a Base64 encoded string."""
return base64.b64encode(str(value))
def value_from_xml_string(self, value):
"""Returns a ByteString value parsed from a Base64 encoded string, or
None if the string is empty."""
if(not value):
return None
return base64.b64decode(value)
def value_from_raw_string(self, value):
"""Returns the given string."""
return value
class BlobHandler(ByteStringHandler):
"""PropertyHandler for blob property instances."""
def __init__(self, property_name, property_type):
super(BlobHandler, self).__init__(property_name, property_type)
def can_query(self):
"""Blob properties may not be used in query filters."""
return False
class BaseReferenceHandler(PropertyHandler):
"""Base PropertyHandler for property instances which work with
keys/references."""
def __init__(self, property_name, property_type):
super(BaseReferenceHandler, self).__init__(property_name,
property_type)
def get_data_type(self):
"""Returns the db.Key type."""
return db.Key
def get_value(self, model):
"""Returns the key of the referenced model instance."""
value = self.property_type.get_value_for_datastore(model)
# for dynamic props, this is still sometimes the referenced object
# and not the key
if(value and (not isinstance(value, self.get_data_type()))):
value = value.key()
return value
class ReferenceHandler(BaseReferenceHandler):
"""PropertyHandler for reference property instances."""
def __init__(self, property_name, property_type):
super(ReferenceHandler, self).__init__(property_name, property_type)
def write_xsd_metadata_annotation(self, prop_el):
"""Writes the annotation metadata for reference properties."""
annot_el = super(ReferenceHandler, self).write_xsd_metadata_annotation(
prop_el)
appinf_el = annot_el.childNodes[0].childNodes[0]
reference_class_name = XSD_ANY_NAME
reference_class = self.property_type.reference_class
if (reference_class is not None) and (reference_class is not db.Model):
reference_class_name = reference_class.__name__
appinf_el.attributes[REFERENCECLASS_ATTR_NAME] = reference_class_name
return annot_el
class BlobReferenceHandler(BaseReferenceHandler):
"""PropertyHandler for blobstore reference property instances."""
def __init__(self, property_name, property_type):
super(BlobReferenceHandler, self).__init__(property_name,
property_type)
def get_data_type(self):
"""Returns the blobstore.BlobKey type."""
return blobstore.BlobKey
def write_xml_value(self, parent_el, prop_xml_name, model,
blob_info_format):
"""Returns an xml element containing the blobstore.BlobKey and
optionally containing the BlobInfo properties as attributes."""
blob_key = self.get_value(model)
if(self.empty(blob_key)):
return None
blob_el = append_child(parent_el, prop_xml_name,
self.value_to_string(blob_key))
if(blob_info_format == QUERY_BLOBINFO_TYPE_INFO):
# include all available blobinfo properties
blob_info = blobstore.BlobInfo.get(blob_key)
if blob_info:
for prop_xml_name, prop_handler in (
BLOBINFO_PROP_HANDLERS.iteritems()):
attr_value = prop_handler.get_value_as_string(blob_info)
if(attr_value is not EMPTY_VALUE):
blob_el.attributes[prop_xml_name] = attr_value
blob_el.attributes[prop_xml_name].disp_meta_ = (
prop_handler.property_type)
return blob_el
def write_xsd_metadata(self, parent_el, prop_xml_name):
"""Returns the XML Schema element for this property type appended to
the given parent element. Adds the BlobInfo complex type if
necessary."""
# add simple element for this property of type BlobInfo
prop_el = super(BlobReferenceHandler, self).write_xsd_metadata(
parent_el, prop_xml_name)
# add the BlobInfo type definition if not already added
schema_el = parent_el.ownerDocument.documentElement
has_blob_info = False
for type_el in schema_el.childNodes:
if(type_el.attributes[NAME_ATTR_NAME].nodeValue ==
BLOBINFO_TYPE_NAME):
has_blob_info = True
break
# we need to add the BlobInfo type def
if not has_blob_info:
blob_type_el = append_child(schema_el, XSD_COMPLEXTYPE_NAME)
blob_type_el.attributes[NAME_ATTR_NAME] = BLOBINFO_TYPE_NAME
ext_el = append_child(append_child(blob_type_el,
XSD_SIMPLECONTENT_NAME),
XSD_EXTENSION_NAME)
ext_el.attributes[BASE_ATTR_NAME] = XSD_NORMAL_STR
for prop_xml_name, prop_handler in (
BLOBINFO_PROP_HANDLERS.iteritems()):
xsd_append_attribute(ext_el, prop_xml_name,
prop_handler.get_type_string())
return prop_el
def value_to_response(self, dispatcher, prop_xml_name, value, path):
"""Writes the output a blobkey property (or the blob contents) to the
dispatcher's response."""
if((len(path) > 0) and (path.pop(0) == CONTENT_PATH)):
blob_info = None
if value:
blob_info = blobstore.BlobInfo.get(value)
dispatcher.serve_blob(blob_info)
return
# just return blobinfo key
super(BlobReferenceHandler, self).value_to_response(
dispatcher, prop_xml_name, value, path)
def value_from_request(self, dispatcher, model, path):
"""Writes a single property from the dispatcher's response."""
if(len(path) == 0):
# just return blobinfo key
super(BlobReferenceHandler, self).value_from_request(dispatcher,
model, path)
return
if(path.pop(0) != CONTENT_PATH):
raise DispatcherException(404)
dispatcher.upload_blob(path, model, self.property_name)
class KeyHandler(BaseReferenceHandler):
"""PropertyHandler for primary 'key' of a Model instance."""
def __init__(self):
super(KeyHandler, self).__init__(KEY_PROPERTY_NAME, KEY_PROPERTY_TYPE)
def get_query_field(self):
"""Returns the special key query field name '__key__'"""
return KEY_QUERY_FIELD
def get_value(self, model):
"""Returns the key of the given model instance if it has been saved,
EMPTY_VALUE otherwise."""
if(not model.is_saved()):
return EMPTY_VALUE
return model.key()
def get_type_string(self):
"""Returns the custom 'KeyProperty' type name."""
return KEY_PROPERTY_TYPE_NAME
class ListHandler(PropertyHandler):
"""PropertyHandler for lists property instances."""
def __init__(self, property_name, property_type):
super(ListHandler, self).__init__(property_name, property_type)
self.sub_handler = get_property_handler(
ITEM_EL_NAME,
DATA_TYPE_TO_PROPERTY_TYPE[
get_type_name(property_type.item_type)]())
def get_type_string(self):
"""Returns the type string 'ListProperty:' + <sub_type_string>."""
return (super(ListHandler, self).get_type_string() +
DATA_TYPE_SEPARATOR + self.sub_handler.get_type_string())
def can_query(self):
"""Can query is based on the list element type."""
return self.sub_handler.can_query()
def value_for_query(self, value):
"""Returns the value for a query filter based on the list element
type."""
return self.sub_handler.value_from_xml_string(value)
def write_xml_value(self, parent_el, prop_xml_name, model,
blob_info_format):
"""Returns a list element containing value elements for the property
from the given model instance appended to the given parent element."""
values = self.get_value(model)
if(not values):
return None
list_el = append_child(parent_el, prop_xml_name)
for value in values:
append_child(list_el, ITEM_EL_NAME,
self.sub_handler.value_to_string(value),
self.sub_handler.property_type)
return list_el
def read_xml_value(self, props, prop_el):
"""Adds a list containing the property values to the given property
dict converted from an xml list element."""
values = []
sub_props = {}
for item_node in prop_el.childNodes:
if((item_node.nodeType == item_node.ELEMENT_NODE) and
(str(item_node.nodeName) == ITEM_EL_NAME)):
self.sub_handler.read_xml_value(sub_props, item_node)
values.append(sub_props.pop(ITEM_EL_NAME))
props[self.property_name] = values
def write_xsd_metadata(self, parent_el, prop_xml_name):
"""Returns the XML Schema list element for this property type
appended to the given parent element."""
list_el = super(ListHandler, self).write_xsd_metadata(parent_el,
prop_xml_name)
seq_el = xsd_append_sequence(list_el)
xsd_append_element(seq_el, ITEM_EL_NAME,
self.sub_handler.get_type_string(), XSD_NO_MIN,
XSD_NO_MAX)
return list_el
def write_xsd_metadata_annotation(self, prop_el):
"""Writes the list property metadata annotation element and returns
it."""
annotation_el = append_child(prop_el, XSD_ANNOTATION_NAME)
element_el = xsd_append_rest_metadata(annotation_el)
prop_type = self.property_type
element_el.attributes[REQUIRED_ATTR_NAME] = (
str(prop_type.required).lower())
element_el.attributes[INDEXED_ATTR_NAME] = (
str(prop_type.indexed).lower())
if prop_type.verbose_name is not None:
element_el.attributes[VERBOSENAME_ATTR_NAME] = (
unicode(prop_type.verbose_name))
# list properties always have at least an empty list as default
default_el = append_child(element_el, REST_MD_DEFAULT_NAME)
if prop_type.default is not None:
for val in prop_type.default:
append_child(default_el, ITEM_EL_NAME,
self.sub_handler.value_to_string(val))
return annotation_el
def value_to_response(self, dispatcher, prop_xml_name, value, path):
"""Writes this list (or a single element of this list) to the
dispatcher's response."""
if(len(path) > 0):
# get individual list element
item_value = value[int(path.pop(0))]
self.sub_handler.value_to_response(dispatcher, ITEM_EL_NAME,
item_value, path)
return
# return entire list as xml/json
impl = minidom.getDOMImplementation()
doc = None
try:
doc = impl.createDocument(None, prop_xml_name, None)
if(value):
for val in value:
append_child(doc.documentElement, ITEM_EL_NAME,
self.sub_handler.value_to_string(val),
self.sub_handler.property_type)
dispatcher.write_output(dispatcher.doc_to_output(doc))
finally:
if doc:
doc.unlink()
def value_from_request(self, dispatcher, model, path):
"""Writes this list property (or a single element of the list) from
the dispatcher's response."""
if(len(path) > 0):
# set individual list element
item_value = self.sub_handler.value_from_raw_string(
dispatcher.request.body_file.getvalue())
value = getattr(model, self.property_name)
item_index = int(path.pop(0))
if(item_index == len(value)):
value.append(item_value)
else:
value[item_index] = item_value
return
# set entire list from xml/json
doc = dispatcher.input_to_xml()
try:
props = {}
self.read_xml_value(props, doc.documentElement)
value = props[self.property_name]
setattr(model, self.property_name, value)
finally:
doc.unlink()
# Dynamic Property: "values for query"
# Coerce basic Python types from strings.
DYN_FLOAT_RE = re.compile(r"^[+-]?[0-9]+[.][0-9]+$")
DYN_INTEGER_RE = re.compile(r"^[+-]?[0-9]+$")
# Date-time and Time:
# datetime == "2012-01-31T22:00:01"
# date == "2012-01-31"
# time == "22:00:01"
DYN_DATETIME_RE = re.compile(r"^\d{4}[-]\d{2}[-]\d{2}T\d{2}[:]\d{2}[:]\d{2}$")
DYN_DATE_RE = re.compile(r"^\d{4}[-]\d{2}[-]\d{2}$")
DYN_TIME_RE = re.compile(r"^\d{2}[:]\d{2}[:]\d{2}$")
# TODO: complex, ...
class DynamicPropertyHandler(object):
"""PropertyHandler for dynamic properties on Expando models."""
def __init__(self, property_name):
self.property_name = property_name
self.storage_name = None
def get_query_field(self):
"""Returns the field name which should be used to query this
property."""
return self.property_name
def can_query(self):
"""Dynamic properties can be used in query filters.
However, only fields with the datatype of the query value would match.
"""
return True
def write_xml_value(self, parent_el, prop_xml_name, model,
blob_info_format):
"""Returns the property value from the given model instance converted
to an xml element (with a type attribute) of the appropriate type and
appended to the given parent element."""
value = getattr(model, self.property_name)
prop_handler = self.get_handler(None, value)
prop_el = prop_handler.write_xml_value(parent_el, prop_xml_name, model,
blob_info_format)
if prop_el:
prop_el.attributes[TYPE_ATTR_NAME] = prop_handler.get_type_string()
return prop_el
def read_xml_value(self, props, prop_el):
"""Adds the value for this property to the given property dict
converted from an xml element, either as a StringProperty value if no
type attribute exists or as the type given in a type attribute."""
if(prop_el.hasAttribute(TYPE_ATTR_NAME)):
prop_type = str(prop_el.attributes[TYPE_ATTR_NAME].value)
self.get_handler(prop_type, None).read_xml_value(props, prop_el)
return
props[self.property_name] = get_node_text(prop_el.childNodes)
def value_for_query(self, value):
"""Returns the value for this property from the given string value
(may be None), for use in a query filter. Coerce the string value to
a basic Python type, and let the Property subclass validate it
further."""
if value is None:
query_value = None
elif (value[0] == "'" or value[0] == '"') and value[0] == value[-1]:
# Quoted values do not get coerced, e.g. "-4".
query_value = unicode(value[1:-1])
# Python type coercions:
elif DYN_FLOAT_RE.match(value.strip()):
query_value = float(value)
elif DYN_INTEGER_RE.match(value.strip()):
query_value = int(value)
elif DYN_DATETIME_RE.match(value.strip()):
query_value = parse_date_time(value.strip(),
DATE_TIME_FORMAT_NO_MS,
datetime, True)
elif DYN_TIME_RE.match(value.strip()):
query_value = parse_date_time(value.strip(), TIME_FORMAT_NO_MS,
datetime.time, True)
elif DYN_DATE_RE.match(value.strip()):
query_value = parse_date_time(value.strip(), DATE_FORMAT,
datetime.date, True)
else:
# keep as a string.
query_value = unicode(value)
return query_value
def get_handler(self, property_type, value):
"""Returns the relevant PropertyHandler based on the given
property_type string or property value."""
prop_args = []
sub_handler = None
if(value is not None):
property_type = DATA_TYPE_TO_PROPERTY_TYPE[
get_instance_type_name(value)]
if(property_type is db.ListProperty):
prop_args.append(type(value[0]))
if(isinstance(property_type, basestring)):
if DATA_TYPE_SEPARATOR in property_type:
property_type, sub_property_type = property_type.split(
DATA_TYPE_SEPARATOR, 1)
sub_handler = get_property_handler(ITEM_EL_NAME, getattr(
db, sub_property_type)())
prop_args.append(sub_handler.get_data_type())
property_type = getattr(db, property_type)
property_type = property_type(*prop_args)
property_type.name = self.property_name
return get_property_handler(self.property_name, property_type)
def get_property_handler(property_name, property_type):
"""Returns a PropertyHandler instance with the given name appropriate for
the given type."""
if(isinstance(property_type, (db.DateTimeProperty, db.TimeProperty,
db.DateProperty))):
return DateTimeHandler(property_name, property_type)
elif(isinstance(property_type, db.BooleanProperty)):
return BooleanHandler(property_name, property_type)
elif(isinstance(property_type, db.ReferenceProperty)):
return ReferenceHandler(property_name, property_type)
elif(isinstance(property_type, db.ByteStringProperty)):
return ByteStringHandler(property_name, property_type)
elif(isinstance(property_type, db.BlobProperty)):
return BlobHandler(property_name, property_type)
elif(isinstance(property_type, db.TextProperty)):
return TextHandler(property_name, property_type)
elif(isinstance(property_type, db.ListProperty)):
return ListHandler(property_name, property_type)
elif(isinstance(property_type, blobstore.BlobReferenceProperty)):
return BlobReferenceHandler(property_name, property_type)
return PropertyHandler(property_name, property_type)
class ModelQuery(object):
"""Utility class for holding parameters for a model query."""
def __init__(self):
self.fetch_page_size = MAX_FETCH_PAGE_SIZE
self.fetch_offset = None
self.fetch_cursor = None
self.ordering = None
self.order_type_idx = QUERY_ORDER_ASC_IDX
self.query_expr = None
self.query_params = []
self.next_fetch_offset = ""
def parse(self, dispatcher, model_handler):
"""Parses the current request into a query."""
for arg in dispatcher.request.arguments():
if(arg == QUERY_OFFSET_PARAM):
query_offset = str(dispatcher.request.get(QUERY_OFFSET_PARAM))
if query_offset[0:2] == QUERY_CURSOR_PREFIX:
self.fetch_cursor = query_offset[2:]
else:
self.fetch_offset = int(query_offset)
continue
if(arg == QUERY_PAGE_SIZE_PARAM):
self.fetch_page_size = int(
dispatcher.request.get(QUERY_PAGE_SIZE_PARAM))
continue
if(arg == QUERY_ORDERING_PARAM):
ordering_field = dispatcher.request.get(QUERY_ORDERING_PARAM)
if(ordering_field[0] == "-"):
ordering_field = ordering_field[1:]
self.order_type_idx = QUERY_ORDER_DSC_IDX
prop_handler = model_handler.get_property_handler(
ordering_field)
if(not prop_handler.can_query()):
raise KeyError("Can not order on property %s" %
ordering_field)
self.ordering = prop_handler.get_query_field()
continue
if(arg in EXTRA_QUERY_PARAMS):
#ignore
continue
match = QUERY_TERM_PATTERN.match(arg)
if(match is None):
logging.warning("ignoring unexpected query param %s", arg)
continue
query_type = match.group(1)
query_field = match.group(2)
query_sub_expr = QUERY_EXPRS[query_type]
query_values = dispatcher.request.get_all(arg)
if(query_type == QUERY_LIST_TYPE):
query_values = [v.split(",") for v in query_values]
query_field, query_values = model_handler.read_query_values(
query_field, query_values)
for value in query_values:
self.query_params.append(value)
query_sub_expr = query_sub_expr % (query_field,
len(self.query_params))
if(not self.query_expr):
self.query_expr = QUERY_PREFIX + query_sub_expr
else:
self.query_expr += QUERY_JOIN + query_sub_expr
self.fetch_page_size = max(min(dispatcher.fetch_page_size,
self.fetch_page_size,
MAX_FETCH_PAGE_SIZE), 1)
class Lazy(object):
"""Utility class for enabling lazy initialization of decorated
properties."""
def __init__(self, calculate_function):
self._calculate = calculate_function
def __get__(self, obj, _=None):
if obj is None:
return self
value = self._calculate(obj)
setattr(obj, self._calculate.func_name, value)
return value
class ModelHandler(object):
"""Handler for a Model (or Expando) type which manages converting
instances to and from xml."""
def __init__(self, model_name, model_type, model_methods):
self.model_name = model_name
self.model_type = model_type
self.key_handler = KeyHandler()
self.model_methods = model_methods
@Lazy
def property_handlers(self):
"""Lazy initializer for the property_handlers dict."""
prop_handlers = {}
for prop_name, prop_type in self.model_type.properties().iteritems():
prop_handler = get_property_handler(prop_name, prop_type)
prop_xml_name = convert_to_valid_xml_name(
prop_handler.property_name)
prop_handlers[prop_xml_name] = prop_handler
return prop_handlers
def is_dynamic(self):
"""Returns True if this Model type supports dynamic properties (is a
subclass of Expando), False otherwise."""
return issubclass(self.model_type, db.Expando)
def get(self, key):
"""Returns the model instance with the given key."""
model = self.model_type.get(key)
if model and Dispatcher.enable_etags:
# compute pristine hash before any modifications are made
self.hash_model(model)
return model
@classmethod
def put(cls, model):
"""Saves a new/updated model instance."""
model.put()
if Dispatcher.enable_etags:
# compute the new etag for the modified instance
cls.hash_model(model, True)
def create(self, props):
"""Returns a newly created model instance with the given properties
(as a keyword dict)."""
# convert property names to storage names, since the Model
# constructor seems to expect properties using the storage name
# (blech!)
for prop_handler in self.property_handlers.itervalues():
if(prop_handler.storage_name and
(prop_handler.property_name in props)):
props[prop_handler.storage_name] = props.pop(
prop_handler.property_name)
return self.model_type(**props)
def get_all(self, model_query):
"""Returns all model instances of this type matching the given
query."""
if model_query.fetch_offset is not None:
# if possible, attempt to fetch more than we really want so that
# we can determine if we have more results. this trick is only
# possible if fetching w/ offsets
real_fetch_page_size = model_query.fetch_page_size
if(model_query.fetch_page_size < MAX_FETCH_PAGE_SIZE):
model_query.fetch_page_size += 1
if(model_query.query_expr is None):
query = self.model_type.all()
if(model_query.ordering):
query.order(QUERY_ORDER_PREFIXES[model_query.order_type_idx] +
model_query.ordering)
else:
if(model_query.ordering):
model_query.query_expr += (
QUERY_ORDERBY + model_query.ordering +
QUERY_ORDER_SUFFIXES[model_query.order_type_idx])
query = self.model_type.gql(model_query.query_expr,
*model_query.query_params)
if model_query.fetch_offset is None:
if model_query.fetch_cursor:
query.with_cursor(model_query.fetch_cursor)
models = query.fetch(model_query.fetch_page_size)
if(len(models) == model_query.fetch_page_size):
try:
model_query.next_fetch_offset = (QUERY_CURSOR_PREFIX +
query.cursor())
except AssertionError:
# some queries don't allow cursors, fallback to offsets
model_query.next_fetch_offset = str(
model_query.fetch_page_size)
else:
models = query.fetch(model_query.fetch_page_size,
model_query.fetch_offset)
if(len(models) >= model_query.fetch_page_size):
model_query.next_fetch_offset = str(real_fetch_page_size +
model_query.fetch_offset)
# trim list to the actual size we want
if(len(models) > real_fetch_page_size):
models = models[0:real_fetch_page_size]
return models
def delete_all(self, model_query):
"""Deletes all model instances of this type matching the given
query."""
if(model_query.query_expr is None):
query = self.model_type.all()
else:
query = self.model_type.gql(model_query.query_expr,
*model_query.query_params)
db.delete(query)
def get_property_handler(self, prop_name):
"""Returns the relevant property handler for the given property
name."""
prop_name = str(prop_name)
if(prop_name == KEY_PROPERTY_NAME):
return self.key_handler
elif(prop_name in self.property_handlers):
return self.property_handlers[prop_name]
elif(self.is_dynamic()):
return DynamicPropertyHandler(prop_name)
else:
raise KeyError("Unknown property %s" % prop_name)
def read_xml_value(self, model_el):
"""Returns a property dictionary for this Model from the given model
element."""
props = {}
for prop_node in model_el.childNodes:
if(prop_node.nodeType != prop_node.ELEMENT_NODE):
continue
prop_xml_name = prop_node.nodeName
if prop_xml_name[0] == "_":
# ignore incoming properties which start with underscore,
# since this is an invalid property name anyway
continue
self.get_property_handler(prop_xml_name).read_xml_value(
props, prop_node)
return props
def read_xml_property(self, prop_el, props, prop_handler):
"""Reads a property from a property element."""
prop_handler.read_xml_value(props, prop_el)
def read_query_values(self, prop_query_name, prop_query_values):
"""Returns a tuple of (query_field, query_values) from the given
query property name and value list."""
prop_handler = self.get_property_handler(prop_query_name)
if(not prop_handler.can_query()):
raise KeyError("Can not filter on property %s" % prop_query_name)
return (prop_handler.get_query_field(),
[self.read_query_value(prop_handler, v)
for v in prop_query_values])
def read_query_value(self, prop_handler, prop_query_value):
"""Returns a query value from the given query property handler and
value (may be a list)."""
if is_list_type(prop_query_value):
return [prop_handler.value_for_query(v) for v in prop_query_value]
else:
return prop_handler.value_for_query(prop_query_value)
def write_xml_value(self, model_el, model, blob_info_format,
include_props):
"""Appends the properties of the given instance as xml elements to
the given model element."""
# if namespaces are readable externally, set relevant attr
if READ_EXT_NS in Dispatcher.external_namespaces:
model_ns = None
if model.is_saved():
model_ns = model.key().namespace()
if model_ns:
model_el.attributes[MODELNS_ATTR_NAME] = model_ns
# add etag attr if enabled
if Dispatcher.enable_etags:
model_el.attributes[ETAG_ATTR_NAME] = model_hash_to_str(
self.hash_model(model))
# write key property first
if((include_props is None) or (KEY_PROPERTY_NAME in include_props)):
self.write_xml_property(model_el, model, KEY_PROPERTY_NAME,
self.key_handler, blob_info_format)
# write static properties next
for prop_xml_name, prop_handler in self.property_handlers.iteritems():
if((include_props is None) or (prop_xml_name in include_props)):
self.write_xml_property(model_el, model, prop_xml_name,
prop_handler, blob_info_format)
# write dynamic properties last
for prop_name in model.dynamic_properties():
prop_xml_name = convert_to_valid_xml_name(prop_name)
if((include_props is None) or (prop_xml_name in include_props)):
self.write_xml_property(model_el, model, prop_xml_name,
DynamicPropertyHandler(prop_name),
blob_info_format)
def write_xml_property(self, model_el, model, prop_xml_name, prop_handler,
blob_info_format):
"""Writes a property as a property element."""
prop_handler.write_xml_value(model_el, prop_xml_name, model,
blob_info_format)
def write_xsd_metadata(self, type_el, model_xml_name):
"""Appends the XML Schema elements of the property types of this
model type to the given parent element."""
top_el = append_child(type_el, XSD_ELEMENT_NAME)
top_el.attributes[NAME_ATTR_NAME] = model_xml_name
if Dispatcher.include_docstring_in_schema and self.model_type.__doc__:
annotation_el = append_child(top_el, XSD_ANNOTATION_NAME)
append_child(annotation_el, XSD_DOCUMENTATION_NAME,
self.model_type.__doc__)
seq_el = xsd_append_sequence(top_el)
self.key_handler.write_xsd_metadata(seq_el, KEY_PROPERTY_NAME)
for prop_xml_name, prop_handler in self.property_handlers.iteritems():
prop_handler.write_xsd_metadata(seq_el, prop_xml_name)
if(self.is_dynamic()):
any_el = append_child(seq_el, XSD_ANY_NAME)
any_el.attributes[XSD_ATTR_NAMESPACE] = XSD_ANY_NAMESPACE
any_el.attributes[XSD_ATTR_PROCESSCONTENTS] = XSD_LAX_CONTENTS
any_el.attributes[XSD_ATTR_MINOCCURS] = XSD_NO_MIN
any_el.attributes[XSD_ATTR_MAXOCCURS] = XSD_NO_MAX
if READ_EXT_NS in Dispatcher.external_namespaces:
xsd_append_attribute(seq_el.parentNode, MODELNS_ATTR_NAME, None,
XSD_NORMAL_STR)
if Dispatcher.enable_etags:
xsd_append_attribute(seq_el.parentNode, ETAG_ATTR_NAME, None,
XSD_NORMAL_STR)
@classmethod
def hash_model(cls, model, force_rehash=False):
"""Returns a hash of the model, suitable for an etag value."""
# after computing the hash, we store it on the model for future
# retrieval (useful if the model is later modified)
if((not hasattr(model, "model_hash_")) or force_rehash):
model.model_hash_ = cls.hash_model_impl(model)
return model.model_hash_
@classmethod
def hash_model_impl(cls, model):
"""Returns a hash of the model, suitable for an etag value."""
# if entity versions are available, use them
entity_version = metadata.get_entity_group_version(model)
if entity_version:
return entity_version
# otherwise, create hash of all model props (and key)
model_hash = 0
for prop_key, prop_value in db.to_dict(model).iteritems():
prop_hash = hash(prop_key)
if is_list_type(prop_value):
for idx in range(0, len(prop_value)):
prop_hash += (idx * hash(prop_value[idx]))
else:
prop_hash += hash(prop_value)
model_hash = model_hash ^ prop_hash
model_hash = model_hash ^ (hash(KEY_PROPERTY_NAME) + hash(model.key()))
return model_hash
# static collection of property handlers for BlobInfo types (because
# BlobInfo.properties() is a set not a dict)
BLOBINFO_PROP_HANDLERS = {
"content_type": PropertyHandler("content_type", db.StringProperty()),
"creation": DateTimeHandler("creation", db.DateTimeProperty()),
"filename": PropertyHandler("filename", db.StringProperty()),
"size": PropertyHandler("size", db.IntegerProperty())}
class DispatcherException(Exception):
"""Exception which contains an http error code to be returned from the
current request. If error_code is None, the thrower is assumed to have
configured the response appropriately before throwing."""
def __init__(self, error_code=None):
super(DispatcherException, self).__init__()
self.error_code = error_code
class Authenticator(object):
"""Handles authentication of REST API calls."""
def authenticate(self, dispatcher):
"""Authenticates the current request for the given dispatcher.
Returns if authentication succeeds, otherwise raises a
DispatcherException with an appropriate error code, e.g. 403 or 404
(see the Dispatcher.forbidden() and Dispatcher.not_found() methods).
Note, an error_code of None is handled specially by the Dispatcher
(the response is not modified) so that, for example, the
Authenticator could issue an HTTP authentication challenge by
configuring the response appropriately and then throwing a
DispatcherException with no code.
Args:
dispatcher: the dispatcher for the request to be authenticated
"""
pass
class Authorizer(object):
"""Handles authorization for REST API calls. In general, authorization
failures in can_* methods should raise a DispatcherException with an
appropriate error code while filter_* methods should simply remove any
unauthorized data."""
def can_read_metadata(self, dispatcher, model_name):
"""Returns if the metadata of the model with the given model_name is
visible to the user associated with the current request for the given
dispatcher, otherwise raises a DispatcherException with an
appropriate error code (see the Dispatcher.forbidden() method).
Args:
dispatcher: the dispatcher for the request to be authorized
model_name: the name of the model whose metadata has been requested
"""
pass
def filter_read_metadata(self, dispatcher, model_names):
"""Returns the model_names from the given list whose metadata is
visible to the user associated with the current request for the given
dispatcher.
Args:
dispatcher: the dispatcher for the request to be authorized
model_names: the names of models whose metadata has been requested
"""
return model_names
def can_read(self, dispatcher, model):
"""Returns if the given model can be read by the user associated with
the current request for the given dispatcher, otherwise raises a
DispatcherException with an appropriate error code (see the
Dispatcher.forbidden() method).
Args:
dispatcher: the dispatcher for the request to be authorized
model: the model to be read
"""
pass
def filter_read(self, dispatcher, models):
"""Returns the models from the given list which can be read by the
user associated with the current request for the given dispatcher.
Note, the check_query() method can also be used to filter the models
retrieved from a query.
Args:
dispatcher: the dispatcher for the request to be authorized
models: the models to be read
"""
return models
def check_query(self, dispatcher, query_expr, query_params):
"""Verifies/modifies the given query so that it is valid for the user
associated with the current request for the given dispatcher. For
instance, if every model has an 'owner' field, an implementation of
this method could be:
query_params.append(authenticated_user)
if(not query_expr):
query_expr = 'WHERE owner = :%d' % (len(query_params))
else:
query_expr += ' AND owner = :%d' % (len(query_params))
return query_expr
Args:
dispatcher: the dispatcher for the request to be authorized
query_expr: currently defined query expression, like 'WHERE foo = :1
AND blah = :2', or None for 'query all'
query_params: the list of positional query parameters associated with
the given query_expr
"""
return query_expr
def can_write(self, dispatcher, model, is_replace):
"""Returns if the given model can be modified by the user associated
with the current request for the given dispatcher, otherwise raises a
DispatcherException with an appropriate error code (see the
Dispatcher.forbidden() method).
Args:
dispatcher: the dispatcher for the request to be authorized
model: the model to be modified
is_replace: True if this is a full update (PUT), False otherwise
(POST)
"""
pass
def filter_write(self, dispatcher, models, is_replace):
"""Returns the models from the given list which can be modified by
the user associated with the current request for the given
dispatcher.
Args:
dispatcher: the dispatcher for the request to be authorized
models: the models to be modified
is_replace: True if this is a full update (PUT), False otherwise
(POST)
"""
return models
def can_write_blobinfo(self, dispatcher, model, property_name):
"""Returns if the a blob for the given property_name on the given
model can be uploaded by the user associated with the current request
for the given dispatcher, otherwise raises a DispatcherException with
an appropriate error code (see the Dispatcher.forbidden() method).
This call is a pre-check _before_ the blob is uploaded (there will be
another, normal can_write() check after the upload succeeds).
Args:
dispatcher: the dispatcher for the request to be authorized
model: the model to be (eventually) modified
property_name: the name of the BlobInfo to be uploaded.
"""
pass
def can_delete(self, dispatcher, model_type, model_key):
"""Returns if the given model can be deleted by the user associated
with the current request for the given dispatcher, otherwise raises a
DispatcherException with an appropriate error code (see the
Dispatcher.forbidden() method).
Args:
dispatcher: the dispatcher for the request to be authorized
model_type: the class of the model to be be deleted
model_key: the key of the model to be deleted
"""
pass
def check_delete_query(self, dispatcher, query_expr, query_params):
"""Verifies/modifies the given delete query so that it is valid for
the user associated with the current request for the given
dispatcher. See check_query() for example usage.
Args:
dispatcher: the dispatcher for the request to be authorized
query_expr: currently defined delete query expression, like
'WHERE foo = :1 AND blah = :2', or None for 'query all'
query_params: the list of positional query parameters associated with
the given query_expr
"""
return query_expr
class CachedResponse(object):
"""Simple class used to cache query responses."""
def __init__(self, dispatcher):
request = dispatcher.request
response = dispatcher.response
if not COMPAT_WEBAPP2:
self.out = response.out.getvalue()
else:
self.out = response.out.body
self.content_type = response.disp_out_type_
self.accept = unicode(request.accept)
if Dispatcher.enable_etags:
self.etag = response.headers[ETAG_HEADER]
def matches_request(self, request):
"""Checks if the given request acceptably matches the request which
generated this cached response."""
# for now we just require the "accept" header to match in order to use
# a cached response
return self.accept == unicode(request.accept)
def is_not_modified(self, dispatcher):
"""Checks if the cache response is unmodified with respect to the
given request."""
return (Dispatcher.enable_etags and
(self.etag.strip('"') in dispatcher.request.if_none_match))
def write_output(self, dispatcher):
"""Writes this cached response to the current response output of the
dispatcher."""
dispatcher.response.out.write(self.out)
dispatcher.response.headers[CONTENT_TYPE_HEADER] = self.content_type
if self.etag:
dispatcher.response.headers[ETAG_HEADER] = self.etag
class Dispatcher(webapp.RequestHandler):
"""RequestHandler which presents a REST based API for interacting with
the datastore of a Google App Engine application.
Integrating this handler with an existing application is designed to be
as simple as possible. The user merely needs to configure this class
with the relevant model instances using one of the add_model*() class
methods. Example usage is include in the module level documentation.
This handler has builtin support for caching get requests using the
memcache API. This can be controlled via two class properties:
caching: True to enable caching, False to disable
cache_time: Time in seconds for results to be cached
base_url: URL prefix expected on requests
fetch_page_size: number of instances to return per get-all call
(note, App Engine has a builtin limit of 1000)
authenticator: Authenticator which controls access to this service
(default allows any caller)
authorizer: Authorizer which controls access to the data in this
service (default allows all access)
output_content_types: content types which may be requested for
output. the last value in the list is the
'default' type used when no type is requested
by the caller. the only supported types are
currently JSON_CONTENT_TYPE and
XML_CONTENT_TYPE.
include_docstring_in_schema: whether or not the docstring for a Model
should be included in the schema.
Defaults to False
enable_delete_query: whether or not query based deletes are
supported. Defaults to False
enable_delete_all: whether or not 'delete all' queries are supported
(only used if enable_delete_query is True).
Defaults to False
external_namespaces: a set of values which control how namespaces are
handled external to the handler. The allowabled
values in the set are zero or more of READ and
WRITE. If READ is specified, model namespaces
will be included in outgoing models. If WRITE
is specified, callers are allowed to specify
namespaces in the url (otherwise, caller
specified namespaces will cause a 404).
Regardless of these settings, the programmer is
free to utilize namespaces interally to the
application. If namespaces are being used for
multi-tenancy support, this property alone is
not sufficient protection against data leaks (as
namespace information is included in keys).
Secure multi-tenant implementations should have
an Authorizer which checks all model namespaces
(an example NamespaceAuthorizer is provided in
the wiki). Defaults to hidden external
namespaces (empty set)
enable_etags: whether or not etags (and related) headers are sent and
honored. if enabled, 'If-Match' will be checked on sets
and 'If-None-Match' will be checked on gets.
Defaults to False
"""
caching = False
cache_time = 300
base_url = ""
fetch_page_size = 50
authenticator = Authenticator()
authorizer = Authorizer()
if not COMPAT_WEBAPP2:
# webapp picks the default as the last option
output_content_types = [JSON_CONTENT_TYPE, XML_CONTENT_TYPE]
else:
# webapp2 picks the default as the first option
output_content_types = [XML_CONTENT_TYPE, JSON_CONTENT_TYPE]
include_docstring_in_schema = False
enable_delete_query = False
enable_delete_all = False
external_namespaces = HIDDEN_EXT_NAMESPACES
enable_etags = False
model_handlers = {}
def __init__(self, request=None, response=None):
if not COMPAT_WEBAPP2:
super(Dispatcher, self).__init__()
else:
super(Dispatcher, self).__init__(request, response)
@classmethod
def add_models_from_module(cls, model_module, use_module_name=False,
exclude_model_types=None,
model_methods=ALL_MODEL_METHODS, recurse=False):
"""Adds all models from the given module to this request handler.
The name of the Model class (with invalid characters converted to the
'_' character) will be used as the REST path for Models of that type
(optionally including the module name).
REST paths which conflict with previously added paths will cause a
KeyError.
Args:
model_module: a module instance or the name of a module instance
(e.g. use __name__ to add models from the current
module instance)
use_module_name: True to include the name of the module as part of
the REST path for the Model, False to use the
Model name alone (this may be necessary if Models
with conflicting names are used from different
modules).
exclude_model_types: optional list of Model types to be excluded
from the REST handler.
model_methods: optional methods supported for the given model (one
or more of ['GET', 'POST', 'PUT',
'DELETE', 'GET_METADATA']), defaults to all methods
recurse: True to recurse into sub-modules when searching for
Models, False otherwise
"""
logging.info("adding models from module %s", model_module)
if(not exclude_model_types):
exclude_model_types = []
if(isinstance(model_module, basestring)):
model_module = __import__(model_module)
module_name = ""
if(use_module_name):
module_name = get_type_name(model_module) + "."
for obj_name in dir(model_module):
obj = getattr(model_module, obj_name)
if(isinstance(obj, types.ModuleType)):
# only import "nested" modules, otherwise we get the whole
# world and bad things happen
if(recurse and get_type_name(obj).startswith(
get_type_name(model_module) + ".")):
cls.add_models_from_module(obj, use_module_name,
exclude_model_types,
model_methods, recurse)
elif(isinstance(obj, type) and issubclass(obj, db.Model) and
(obj not in exclude_model_types)):
model_name = module_name + get_type_name(obj)
cls.add_model(model_name, obj, model_methods)
@classmethod
def add_models(cls, models, model_methods=ALL_MODEL_METHODS):
"""Adds the given models from the given dict to this request handler.
The key (with invalid characters converted to the '_' character) will
be used as the REST path for relevant Model value.
REST paths which conflict with previously added paths will cause a
KeyError. Also, the path 'metadata' is reserved.
Args:
models: dict of REST path -> Model class (or a tuple of (Model
class, [allowed methods]) )
"""
for model_name, model_type in models.iteritems():
if is_list_type(model_type):
# Assume we have format:
# {model_name : (ModelClass, [model_method_1, model_method_2])}
model_methods = model_type[1]
model_type = model_type[0]
cls.add_model(model_name, model_type, model_methods)
@classmethod
def add_model(cls, model_name, model_type,
model_methods=ALL_MODEL_METHODS):
"""Adds the given model to this request handler. The name (with
invalid characters converted to the '_' character) will be used as
the REST path for relevant Model value.
REST paths which conflict with previously added paths will cause a
KeyError. Also, the path 'metadata' is reserved.
Args:
model_name: the REST path for the given model
model_type: the Model class
model_methods: optional methods supported for the given model (one
or more of ['GET', 'POST', 'PUT', 'DELETE',
'GET_METADATA']), defaults to all methods
"""
xml_name = convert_to_valid_xml_name(model_name)
if(xml_name == METADATA_PATH):
raise ValueError("cannot use name %s" % METADATA_PATH)
if(model_name in cls.model_handlers):
raise KeyError("name %s already used" % model_name)
if(not issubclass(model_type, db.Model)):
raise ValueError("given model type %s is not a subclass of Model" %
model_type)
cls.model_handlers[xml_name] = ModelHandler(model_name, model_type,
model_methods)
logging.info("added model %s with type %s for methods %s", model_name,
model_type, model_methods)
##
# Error codes used in this handler:
#
# 200 -> okay
# 204 -> noop (okay)
# 400 -> bad req (bad data, invalid properties)
# 404 -> not found (bad path)
# 405 -> method not allowed (method not supported by model)
##
def initialize(self, request, response):
super(Dispatcher, self).initialize(request, response)
if request:
request.disp_query_params_ = None
if response:
response.disp_cache_resp_ = True
response.disp_out_type_ = TEXT_CONTENT_TYPE
def get(self, *_):
"""Does a REST get, optionally using memcache to cache results. See
get_impl() for more details."""
self.authenticator.authenticate(self)
if not self.caching:
self.get_impl()
return
# attempt to return cached response
cached_response = memcache.get(self.request.url)
if cached_response:
cached_response = pickle.loads(cached_response)
# only use cache response if the requests match
if cached_response.matches_request(self.request):
if cached_response.is_not_modified(self):
self.not_modified()
cached_response.write_output(self)
return
self.get_impl()
# don't cache blobinfo content requests
if self.response.disp_cache_resp_:
cached_response = pickle.dumps(CachedResponse(self))
if not memcache.set(self.request.url, cached_response,
self.cache_time):
logging.warning("memcache set failed for %s",
self.request.url)
def get_impl(self):
"""Actual implementation of REST get. Gets metadata (types,
schemas), or actual Model instances.
'/metadata/*' -> See get_metadata() for details
'/<type>[?<query>]' -> gets all Model instances of given type,
optionally querying (200, 404)
'/<type>/<key>' -> gets Model instance with given key (200, 404)
'/<type>/<key>/<prop>' -> gets a single property from the Model
instance with given key (200, 404)
"""
path = self.split_path(1)
model_name = path.pop(0)
if model_name == METADATA_PATH:
out = self.get_metadata(path)
elif(model_name == BLOBUPLOADRESULT_PATH):
# this is the final call from a blobinfo upload
self.response.disp_cache_resp_ = False
path.append(BLOBUPLOADRESULT_PATH)
model_name = path.pop(0)
model_key = path.pop(0)
self.update_impl(path, model_name, model_key, "POST", False)
return
else:
model_handler = self.get_model_handler(model_name, "GET")
model_name = model_handler.model_name
list_props = {}
if (len(path) > 0):
model_key = path.pop(0)
models = model_handler.get(model_key)
self.authorizer.can_read(self, models)
if (len(path) > 0):
# single property get
prop_name = path.pop(0)
prop_handler = model_handler.get_property_handler(
prop_name)
self.get_if_none_match(model_handler, models)
prop_value = prop_handler.get_value(models)
prop_handler.value_to_response(self, prop_name, prop_value,
path)
return
else:
models = self.get_all_impl(model_handler, list_props)
if models is None:
self.not_found()
self.get_if_none_match(model_handler, models, list_props)
out = self.models_to_xml(model_name, model_handler, models,
list_props)
self.write_output(out)
def put(self, *_):
"""Does a REST put.
'/<type>/<key>' -> completely replaces Model instance, returns key as
plain text (200, 400, 404)
"""
self.authenticator.authenticate(self)
path = self.split_path(1)
model_name = path.pop(0)
model_key = None
if (len(path) > 0):
model_key = path.pop(0)
self.update_impl(path, model_name, model_key, "PUT", True)
def post(self, *_):
"""Does a REST post, handles alternate HTTP methods specified via the
'X-HTTP-Method-Override' header"""
self.authenticator.authenticate(self)
real_method = self.request.headers.get(METHOD_OVERRIDE_HEADER, None)
if real_method:
real_method = real_method.upper()
if real_method == "PUT":
self.put()
elif real_method == "DELETE":
self.delete()
elif real_method == "POST":
self.post_impl()
elif real_method == "GET":
self.get()
else:
self.error(405)
else:
self.post_impl()
def post_impl(self, *_):
"""Actual implementation of REST post.
'/<type>' -> creates new Model instance, returns key as plain
text (200, 400, 404)
'/<type>/<key>' -> partially updates Model instance, returns key as
plain text (200, 400, 404)
"""
path = self.split_path(1)
model_name = path.pop(0)
model_key = None
if (len(path) > 0):
model_key = path.pop(0)
self.update_impl(path, model_name, model_key, "POST", False)
def update_impl(self, path, model_name, model_key, method_name,
is_replace):
"""Actual implementation of all Model update methods.
Creates/updates/replaces Model instances as specified. Writes the
key of the modified Model as a plain text result.
"""
model_handler = self.get_model_handler(model_name, method_name)
model_name = model_handler.model_name
is_list = False
models = []
if((not is_replace) and (len(path) > 0)):
# single property update
prop_name = path.pop(0)
if(prop_name == KEY_PROPERTY_NAME):
raise KeyError("Property %s is not modifiable" %
KEY_PROPERTY_NAME)
model = model_handler.get(model_key)
prop_handler = model_handler.get_property_handler(prop_name)
prop_handler.value_from_request(self, model, path)
models.append(model)
else:
doc = self.input_to_xml()
model_els = [(model_key, doc.documentElement)]
if(str(doc.documentElement.nodeName) == LIST_EL_NAME):
is_list = True
model_els = []
for node in doc.documentElement.childNodes:
if(node.nodeType == node.ELEMENT_NODE):
model_els.append((MULTI_UPDATE_KEY, node))
try:
for model_el_key, model_el in model_els:
models.append(self.model_from_xml(
model_el, model_name, model_handler, model_el_key,
is_replace))
except Exception:
logging.exception("failed parsing model")
raise DispatcherException(400)
finally:
doc.unlink()
if is_list:
models = self.authorizer.filter_write(self, models, is_replace)
elif (len(models) > 0):
self.authorizer.can_write(self, models[0], is_replace)
self.update_if_match(model_handler, models)
for model in models:
ModelHandler.put(model)
self.get_if_none_match(model_handler, models)
# if input was not a list, convert single element models list back to
# single element
if(not is_list):
models = models[0]
# note, we specifically look in the query string (don't try to parse
# the POST body)
resp_type = self.get_query_param(QUERY_TYPE_PARAM)
if (resp_type == QUERY_TYPE_FULL):
self.write_output(self.models_to_xml(model_name, model_handler,
models))
elif ((resp_type == QUERY_TYPE_STRUCTURED) or
(resp_type == QUERY_TYPE_XML) or
self.request_structured_output()):
self.write_output(self.keys_to_xml(model_handler, models))
else:
self.write_output(self.keys_to_text(models))
def request_structured_output(self):
"""Returns True if the request explicitly requested structured output
(xml or json) via the accept header."""
out_mime_type = unicode(self.request.accept)
return ((out_mime_type == XML_CONTENT_TYPE) or
(out_mime_type == JSON_CONTENT_TYPE))
def delete(self, *_):
"""Does a REST delete.
'/<type>/<key>' -> delete Model instance w/ given key (200, 204)
'/<type>[?<query>]' -> deletes all Model instances of given type,
optionally querying (200, 204)
"""
self.authenticator.authenticate(self)
path = self.split_path(1)
model_name = path.pop(0)
model_handler = self.get_model_handler(model_name, "DELETE", 204)
model_name = model_handler.model_name
model_key = None
model_query = None
if (len(path) > 0):
model_key = path.pop(0)
else:
model_query = ModelQuery()
model_query.parse(self, model_handler)
# verify that this type of delete is supported
if(not self.enable_delete_query):
logging.warning("query based deletes are currently disabled,"
" see 'enable_delete_query' property")
raise DispatcherException(404)
elif((model_query.query_expr is None) and
(not self.enable_delete_all)):
logging.warning("'delete all' deletes are currently disabled,"
" see 'enable_delete_all' property")
raise DispatcherException(404)
try:
if (model_key is not None):
model_key = db.Key(model_key)
self.authorizer.can_delete(self, model_handler.model_type,
model_key)
self.update_if_match(model_handler, None, (model_key,))
db.delete(model_key)
else:
model_query.query_expr = self.authorizer.check_delete_query(
self, model_query.query_expr, model_query.query_params)
model_handler.delete_all(model_query)
except Exception, ex:
if(isinstance(ex, DispatcherException) and (ex.error_code == 412)):
# we want to throw pre-condition failures
raise ex
logging.warning("delete failed", exc_info=1)
self.error(204)
def get_metadata(self, path):
"""Actual implementation of metadata retrieval.
'/metadata' -> gets list of all Model types
'/metadata/<type>' -> gets XML Schema for the given type (200, 404)
"""
model_name = None
if (len(path) > 0):
model_name = path.pop(0)
impl = minidom.getDOMImplementation()
doc = None
try:
if model_name:
model_handler = self.get_model_handler(model_name,
"GET_METADATA")
model_name = model_handler.model_name
self.authorizer.can_read_metadata(self, model_name)
doc = impl.createDocument(XSD_NS, XSD_SCHEMA_NAME, None)
doc.documentElement.attributes[XSD_ATTR_XMLNS] = XSD_NS
model_handler.write_xsd_metadata(doc.documentElement,
model_name)
else:
doc = impl.createDocument(None, TYPES_EL_NAME, None)
types_el = doc.documentElement
model_names = self.authorizer.filter_read_metadata(
self, list(self.model_handlers.iterkeys()))
for model_name in model_names:
append_child(types_el, TYPE_EL_NAME, model_name)
return self.doc_to_output(doc)
finally:
if doc:
doc.unlink()
def get_all_impl(self, model_handler, list_props):
"""Actual implementation of REST query. Gets Model instances based
on criteria specified in the query parameters.
"""
model_query = ModelQuery()
model_query.parse(self, model_handler)
model_query.query_expr = self.authorizer.check_query(
self, model_query.query_expr, model_query.query_params)
models = model_handler.get_all(model_query)
list_props[QUERY_OFFSET_PARAM] = model_query.next_fetch_offset
models = self.authorizer.filter_read(self, models)
return models
def split_path(self, min_comps):
"""Returns the request path split into non-empty components."""
path = self.request.path
if(path.startswith(self.base_url)):
path = path[len(self.base_url):]
path = [i for i in path.split('/') if i]
if(len(path) < min_comps):
raise DispatcherException(404)
return path
def get_model_handler(self, model_name, method_name, failure_code=404):
"""Returns the ModelHandler with the given name, or None (and sets
the error code given) if there is no handler with the given name."""
# see if namespace was specified, e.g. "<ns>.<model_name>"
ns_model_name = model_name.rpartition(".")
if(ns_model_name[0]):
# only set namespace if callers are allowed to
if WRITE_EXT_NS not in self.external_namespaces:
raise DispatcherException(404)
namespace_manager.set_namespace(ns_model_name[0])
# remove namespace from model name
model_name = ns_model_name[2]
try:
model_handler = self.model_handlers[model_name]
except KeyError:
logging.error("invalid model name %s", model_name, exc_info=1)
raise DispatcherException(failure_code)
if method_name not in model_handler.model_methods:
raise DispatcherException(405)
return model_handler
def doc_to_output(self, doc):
"""Returns the given xml doc serialized using the appropriate
response format."""
out_mime_type = self.request.accept.best_match(
self.output_content_types)
if(out_mime_type == JSON_CONTENT_TYPE):
self.response.disp_out_type_ = JSON_CONTENT_TYPE
return xml_to_json(doc)
self.response.disp_out_type_ = XML_CONTENT_TYPE
return doc.toxml(XML_ENCODING)
def input_to_xml(self):
"""Returns the request doc converted into an xml doc."""
content_type = self.request.headers.get(CONTENT_TYPE_HEADER, None)
if((content_type != None) and
content_type.startswith(JSON_CONTENT_TYPE)):
return json_to_xml(self.request.body_file)
return minidom.parse(self.request.body_file)
def models_to_xml(self, model_name, model_handler, models,
list_props=None):
"""Returns a string of xml of the given models (may be list or single
instance)."""
blob_info_format = self.get_query_param(QUERY_BLOBINFO_PARAM,
QUERY_BLOBINFO_TYPE_KEY)
include_props = self.get_query_param(QUERY_INCLUDEPROPS_PARAM)
if(include_props is not None):
include_props = include_props.split(",")
impl = minidom.getDOMImplementation()
doc = None
try:
if is_list_type(models):
doc = impl.createDocument(None, LIST_EL_NAME, None)
list_el = doc.documentElement
if((list_props is not None) and
(QUERY_OFFSET_PARAM in list_props)):
list_el.attributes[QUERY_OFFSET_PARAM] = (
list_props[QUERY_OFFSET_PARAM])
for model in models:
model_el = append_child(list_el, model_name)
model_handler.write_xml_value(
model_el, model, blob_info_format, include_props)
else:
doc = impl.createDocument(None, model_name, None)
model_handler.write_xml_value(doc.documentElement, models,
blob_info_format, include_props)
return self.doc_to_output(doc)
finally:
if doc:
doc.unlink()
def get_if_none_match(self, model_handler, models, list_props=None):
"""Handles the 'If-None-Match' header for retrieving data, either
setting the outgoing ETag header or returning the not modified
response code as appropriate. Does nothing if etag support is not
enabled."""
if not self.enable_etags:
return
model_hash = self.models_to_hash(model_handler, models, list_props)
if model_hash in self.request.if_none_match:
self.not_modified()
self.response.headers[ETAG_HEADER] = '"%s"' % model_hash
def update_if_match(self, model_handler, models, model_keys=None):
"""Handles the 'If-Match' header for modifying data, either allowing
the update to proceed or returning the precondition failed response
code as appropriate. Does nothing if etag support is not enabled."""
if not self.enable_etags:
return
if('*' not in self.request.if_match):
# caller provided an if-match header (which may be a per-model
# list or an all-collection value)
if(model_keys is not None):
# first need to convert keys to models
models = []
for model_key in model_keys:
model = model_handler.get(model_key)
if model:
models.append(model)
if(self.models_to_hash(model_handler, models) in
self.request.if_match):
# provided per-collection header, which matches
return
for model in models:
if(model_hash_to_str(ModelHandler.hash_model(model)) not in
self.request.if_match):
# provided per-model header, which does not match
self.is_modified()
elif(models is not None):
# see if caller provided per-model etags in the models themselves
# (as attributes)
for model in models:
if(hasattr(model, "in_model_hash_") and
(model.in_model_hash_ !=
model_hash_to_str(ModelHandler.hash_model(model)))):
# provided per-model attribute, which does not match
self.is_modified()
def models_to_hash(self, model_handler, models, list_props=None):
"""Returns an aggregate hash for a collection of models, including any
query offset parameter if available."""
model_hash = 0
if is_list_type(models):
if((list_props is not None) and
(QUERY_OFFSET_PARAM in list_props)):
model_hash = model_hash ^ hash(list_props[QUERY_OFFSET_PARAM])
for model in models:
model_hash = model_hash ^ ModelHandler.hash_model(model)
else:
model_hash = model_hash ^ ModelHandler.hash_model(models)
return model_hash_to_str(model_hash)
def keys_to_xml(self, model_handler, models):
"""Returns a string of xml of the keys of the given models (may be
list or single instance)."""
impl = minidom.getDOMImplementation()
doc = None
try:
if is_list_type(models):
doc = impl.createDocument(None, LIST_EL_NAME, None)
list_el = doc.documentElement
for model in models:
append_child(list_el, KEY_PROPERTY_NAME,
model_handler.key_handler.get_value_as_string(
model))
else:
doc = impl.createDocument(None, KEY_PROPERTY_NAME, None)
doc.documentElement.appendChild(doc.createTextNode(
model_handler.key_handler.get_value_as_string(models)))
return self.doc_to_output(doc)
finally:
if doc:
doc.unlink()
def keys_to_text(self, models):
"""Returns a string of text of the keys of the given models (may be
list or single instance)."""
if(not is_list_type(models)):
models = [models]
return unicode(",".join([str(model.key()) for model in models]))
def model_from_xml(self, model_el, model_name, model_handler, key,
is_replace):
"""Returns a model instance updated from the given model xml
element."""
if(model_name != str(model_el.nodeName)):
raise TypeError("wrong model name, found '%s', expected '%s'" %
(model_el.nodeName, model_name))
props = model_handler.read_xml_value(model_el)
given_key = props.pop(KEY_PROPERTY_NAME, None)
if(key is MULTI_UPDATE_KEY):
if(given_key):
key = str(given_key)
else:
key = None
if(key):
key = db.Key(key.strip())
if(given_key and (given_key != key)):
raise ValueError(
"key in data %s does not match request key %s" %
(given_key, key))
model = model_handler.get(key)
if(is_replace):
for prop_name, prop_type in model.properties().iteritems():
if(prop_name not in props):
setattr(model, prop_name, prop_type.default_value())
for prop_name in model.dynamic_properties():
delattr(model, prop_name)
for prop_name, prop_value in props.iteritems():
setattr(model, prop_name, prop_value)
else:
model = model_handler.create(props)
# check for model specific etag attribute
if(self.enable_etags and
(model_el.attributes.get(ETAG_ATTR_NAME, None) is not None)):
model.in_model_hash_ = str(
model_el.attributes[ETAG_ATTR_NAME].value)
return model
def write_output(self, out):
"""Writes the output to the response."""
if out:
content_type = self.response.disp_out_type_
out_suffix = None
if(content_type == JSON_CONTENT_TYPE):
# check for json callback
callback = self.get_query_param(QUERY_CALLBACK_PARAM)
if callback:
self.response.out.write(callback)
self.response.out.write("(")
out_suffix = ");"
self.response.headers[CONTENT_TYPE_HEADER] = content_type
self.response.out.write(out)
if out_suffix:
self.response.out.write(out_suffix)
def serve_blob(self, blob_info):
"""Serves a BlobInfo response."""
self.response.clear()
self.response.disp_cache_resp_ = False
content_type_preferred = None
if blob_info:
# return actual blob
range_header = self.request.headers.get(RANGE_HEADER, None)
if range_header is not None:
self.response.headers[blobstore.BLOB_RANGE_HEADER] = (
range_header)
self.response.headers[blobstore.BLOB_KEY_HEADER] = (
str(blob_info.key()))
content_type_preferred = blob_info.content_type
self.set_response_content_type(BINARY_CONTENT_TYPE,
content_type_preferred)
def upload_blob(self, path, model, blob_prop_name):
"""Handles a BlobInfo upload to the property with the given name of
the given model instance."""
if(len(path) > 0):
if(path.pop(0) != BLOBUPLOADRESULT_PATH):
raise DispatcherException(404)
# final leg of a blob upload, no modifications left to make, just
# return model result
return
# set blobinfo contents
content_type = self.request.headers.get(CONTENT_TYPE_HEADER, None)
if(not content_type.startswith(FORMDATA_CONTENT_TYPE)):
# pre-authorize the upload
self.authorizer.can_write_blobinfo(self, model, blob_prop_name)
# need to return upload form
redirect_url = self.request.path
if self.request.query_string:
redirect_url += "?" + self.request.query_string
form_url = blobstore.create_upload_url(redirect_url)
self.response.out.write('<html><body>')
self.response.out.write(
'<form action="%s" method="POST" enctype="%s">' %
(form_url, FORMDATA_CONTENT_TYPE))
self.response.out.write(
"""Upload File: <input type="file" name="file"><br> """
"""<input type="submit" name="submit" value="Submit"> """
"""</form></body></html>""")
raise DispatcherException()
else:
# upload completed, update the model
blob_key = None
for key, value in self.request.params.items():
if((key == "file") and isinstance(value, cgi.FieldStorage)):
if 'blob-key' in value.type_options:
blob_key = blobstore.parse_blob_info(value).key()
if blob_key is None:
raise ValueError("Blob upload failed")
setattr(model, blob_prop_name, blob_key)
# authorize the update, post upload. we need to do this here,
# because we have to return a redirect now (the final result is
# not returned until after the redirect)
self.authorizer.can_write(self, model, False)
ModelHandler.put(model)
# redirect will be a GET, so we need to send the caller to a
# special url, so they can get output which looks like what would
# normally result from an update call
result_url = (self.base_url + "/" + BLOBUPLOADRESULT_PATH +
self.request.path[len(self.base_url):])
if self.request.query_string:
result_url += "?" + self.request.query_string
self.redirect(result_url)
raise DispatcherException()
def handle_exception(self, exception, debug_mode):
if(isinstance(exception, DispatcherException)):
# if None, assume thrower has configured the response appropriately
if(exception.error_code is not None):
if(exception.error_code < 400):
self.response.clear()
self.response.set_status(exception.error_code)
else:
self.error(exception.error_code)
else:
super(Dispatcher, self).handle_exception(exception, debug_mode)
def get_query_params(self):
"""Returns the parsed request url query params as a dict."""
# lazy (re)parse query params
if(self.request.disp_query_params_ is None):
self.request.disp_query_params_ = cgi.parse_qs(
self.request.query_string)
return self.request.disp_query_params_
def get_query_param(self, key, default=None):
"""Returns the request url query param for the given key, defaulting
to the given default if not found."""
value = self.get_query_params().get(key, None)
if(value is None):
return default
return value[0]
def set_response_content_type(self, content_type_default,
content_type_preferred=None):
"""Sets the response content-type header based on the request and the
given info."""
content_type = content_type_preferred
if((not content_type) or (content_type.find("*") >= 0)):
content_type = self.request.accept.best_matches()[0]
if((not content_type) or (content_type.find("*") >= 0)):
content_type = content_type_default
self.response.headers[CONTENT_TYPE_HEADER] = content_type
self.response.disp_out_type_ = content_type
def forbidden(self):
"""Convenience method which raises a DispatcherException with a 403
error code."""
raise DispatcherException(403)
def not_found(self):
"""Convenience method which raises a DispatcherException with a 404
error code."""
raise DispatcherException(404)
def not_modified(self):
"""Convenience method which raises a DispatcherException with a 304
status code."""
self.response.headers[CONTENT_TYPE_HEADER] = TEXT_CONTENT_TYPE
raise DispatcherException(304)
def is_modified(self):
"""Convenience method which raises a DispatcherException with a 412
error code."""
raise DispatcherException(412)
| apache-2.0 |
armikhael/software-center | test/gtk3/test_views.py | 1 | 1845 | #!/usr/bin/python
from gi.repository import Gtk, GObject
import sys
import unittest
sys.path.insert(0,"../..")
sys.path.insert(0,"..")
#from mock import Mock
TIMEOUT=300
import softwarecenter.paths
softwarecenter.paths.datadir = "../data"
class TestViews(unittest.TestCase):
def test_viewswitcher(self):
from softwarecenter.ui.gtk3.panes.viewswitcher import get_test_window_viewswitcher
win = get_test_window_viewswitcher()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_catview(self):
from softwarecenter.ui.gtk3.views.catview_gtk import get_test_window_catview
win = get_test_window_catview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_appdetails(self):
from softwarecenter.ui.gtk3.views.appdetailsview_gtk import get_test_window_appdetails
win = get_test_window_appdetails()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_pkgsnames(self):
from softwarecenter.ui.gtk3.views.pkgnamesview import get_test_window_pkgnamesview
win = get_test_window_pkgnamesview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_purchaseview(self):
from softwarecenter.ui.gtk3.views.purchaseview import get_test_window_purchaseview
win = get_test_window_purchaseview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_appview(self):
from softwarecenter.ui.gtk3.views.appview import get_test_window
win = get_test_window()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 |
MatthewWilkes/django | django/contrib/gis/gdal/datasource.py | 357 | 4777 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr and capi:
capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not l:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
| bsd-3-clause |
digdoritos/gimp | plug-ins/pygimp/gimpplugin.py | 17 | 1993 | # Gimp-Python - allows the writing of Gimp plugins in Python.
# Copyright (C) 1997 James Henstridge <james@daa.com.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# plugin.py -- helper for writing gimp plugins
# Copyright (C) 1997, James Henstridge.
#
# This is a small wrapper that makes plugins look like an object class that
# you can derive to create your plugin. With this wrapper, you are pretty
# much responsible for doing everything (checking run_mode, gui, etc). If
# you want to write a quick plugin, you probably want the gimpfu module.
#
# A plugin using this module would look something like this:
#
# import gimp, gimpplugin
#
# pdb = gimp.pdb
#
# class myplugin(gimpplugin.plugin):
# def query(self):
# gimp.install_procedure("plug_in_mine", ...)
#
# def plug_in_mine(self, par1, par2, par3,...):
# do_something()
#
# if __name__ == '__main__':
# myplugin().start()
import gimp
class plugin:
def start(self):
gimp.main(self.init, self.quit, self.query, self._run)
def init(self):
pass
def quit(self):
pass
def query(self):
pass
def _run(self, name, params):
if hasattr(self, name):
return apply(getattr(self, name), params)
else:
raise AttributeError, name
if __name__ == '__main__':
plugin().start()
| gpl-3.0 |
mlaitinen/odoo | openerp/addons/base/res/__init__.py | 384 | 1261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jbigham1/shpescape | shapeft/models.py | 18 | 1495 | import random
import md5
from django.db import models
from ft_auth.models import OAuthAccessToken
STATUS_CODES = {
1 : 'In Queue (%s ahead of you)',
2 : 'Initial Processing',
3 : 'Importing into Fusion Tables',
4 : 'Complete',
6 : 'Error'
}
class shapeUpload(models.Model):
"""An upload -- includes location of initial shape, processing status, etc"""
auth_token = models.ForeignKey(OAuthAccessToken)
uid = models.CharField(max_length=250)
shapefile = models.CharField(max_length=250)
status = models.IntegerField()
status_msg = models.CharField(max_length=250,null=True)
total_rows = models.IntegerField(null=True)
rows_processed = models.IntegerField(null=True)
rows_imported = models.IntegerField(null=True)
ft_table_id = models.IntegerField(null=True)
uploaded = models.DateTimeField(auto_now_add=True)
create_simplify = models.BooleanField(default=True)
create_centroid = models.BooleanField(default=True)
create_centroid_poly = models.BooleanField(default=False)
def get_title(self):
return self.shapefile.split('/')[-1]
def get_status(self):
status = STATUS_CODES[self.status]
if self.status == 1:
queue_length = shapeUpload.objects.filter(status=1).count()
status = status % (queue_length - 1)
return status
def save(self):
salt = 'shapebar'
if not self.id:
super(shapeUpload, self).save()
hash = md5.new(salt + str(self.id))
self.uid = hash.hexdigest()
super(shapeUpload, self).save()
| apache-2.0 |
ViralLeadership/numpy | numpy/doc/structured_arrays.py | 72 | 11442 | """
=================
Structured Arrays
=================
Introduction
============
Numpy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['foo']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
vsajip/django | django/contrib/gis/db/backends/spatialite/creation.py | 7 | 5703 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
if self.connection.ops.spatial_version[:2] >= (3, 0):
# Spatialite >= 3.0.x -- No need to load any SQL file, calling
# InitSpatialMetaData() transparently creates the spatial metadata
# tables
cur = self.connection._cursor()
cur.execute("SELECT InitSpatialMetaData()")
else:
# Spatialite < 3.0.x -- Load the initial SQL
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
with open(spatialite_sql, 'r') as sql_fh:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| bsd-3-clause |
David-Levinthal/gooda | gooda-analyzer/gooda_sum.py | 2 | 8855 | #!/usr/bin/python
"""Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
"""
"""Generate difference spreadsheet of 2 Gooda function_hotspot.csv
or process.csv files.
"""
__author__ = 'tejohnson@google.com (Teresa Johnson)'
import sys
import re
import copy
class Error(Exception):
pass
class Spreadsheet(object):
__slots__ = ('header_dict','header_order','entriesL1_dict',
'entriesL2_dict','type_info')
def __init__(self):
self.header_dict = {}
self.header_order = []
self.entriesL1_dict = {}
self.entriesL2_dict = {}
self.type_info = None
def HeaderKey(self, list):
return self.type_info.HeaderKey(list)
def EntryKeyL1(self, list):
return self.type_info.EntryKeyL1(list)
def EntryKeyL2(self, list):
return self.type_info.EntryKeyL2(list)
def GSKey(self, list):
return self.type_info.GSKey(list)
def FirstCol(self):
return self.type_info.first_data_index
def LastCol(self):
return self.type_info.last_data_index
class SpreadsheetTypeInfo(object):
__slots__ = ('header_key_index','global_sample_key_index','first_data_index',
'last_data_index','func_index','process_index','module_index')
def __init__(self, header_list):
if header_list[3] == '"Function Name"':
self.header_key_index = 3
self.first_data_index = 8
self.func_index = 3
self.process_index = 7
self.module_index = 6
if header_list[self.module_index] != '"Module"':
raise Error('Did not find "Module" at index %d in %s' % (self.module_index,header_list))
if header_list[self.process_index] != '"Process"':
raise Error('Did not find "Process" at index %d in %s' % (self.process_index,header_list))
elif header_list[1] == '"Process Path"':
self.header_key_index = 2
self.first_data_index = 3
self.func_index = None
self.process_index = 1
self.module_index = 2
if header_list[self.module_index] != '"Module Path"':
raise Error('Did not find "Module Path" at index %d in %s' % (self.module_index,header_list))
else:
raise Error('Unexpected header line format: %s' % header_list)
self.global_sample_key_index = 1
# Last column is empty
self.last_data_index = len(header_list) - 2
def HeaderKey(self, list):
return list[self.header_key_index]
def EntryKeyL1(self, list):
if self.func_index:
# Concatenate function, module and process
return list[self.func_index] + list[self.module_index] + list[self.process_index]
else:
return list[self.process_index]
def EntryKeyL2(self, list):
if self.func_index:
return None
else:
return list[self.module_index]
def GSKey(self, list):
return list[self.global_sample_key_index]
def CheckMultiplexFormat(list, start_col, stop_col):
if ((list[start_col-1] != '' and
list[start_col-1] != '"Multiplex"') or
list[start_col] < 1 or
list[stop_col+1] != ''):
raise Error('Unexpected multiplex line format: %s', list)
def BuildSpreadsheet(filename):
input_file = open(filename, 'r')
found_header = False
found_cycles = False
prev_L1_key = None
ss = Spreadsheet()
for line in input_file:
match = re.search(r'^\[$|^]$|^$', line)
if match:
continue
list = re.findall(r'"[^, ][^"]*"|[^\[\]," \n]+|(?<=[\[,])(?=[\],])|(?<=[\[,] )(?=[\],])', line)
if list[0] != '':
raise Error("Expected first column to be empty: %s" % line)
#print list
if not found_header:
ss.type_info = SpreadsheetTypeInfo(list)
found_header = True
if not found_cycles:
key = ss.HeaderKey(list)
#print key
if key == '"Cycles"':
found_cycles = True
if key == '"Multiplex"':
CheckMultiplexFormat(list, ss.FirstCol(), ss.LastCol())
ss.header_dict[key] = list
ss.header_order.append(key)
continue
if ss.GSKey(list) == '"Global sample breakdown"':
ss.entriesL1_dict[ss.GSKey(list)] = list
elif ss.EntryKeyL1(list):
ss.entriesL1_dict[ss.EntryKeyL1(list)] = list
prev_L1_key = ss.EntryKeyL1(list)
else:
if not prev_L1_key:
raise Error("Saw level 2 key before level 1 key: %s" % line)
if prev_L1_key not in ss.entriesL2_dict:
ss.entriesL2_dict[prev_L1_key] = {}
ss.entriesL2_dict[prev_L1_key][ss.EntryKeyL2(list)] = list
#print prev_L1_key, ":", ss.EntryKeyL2(list), ":", list
input_file.close()
return ss
def PrintCsvLine(list, last=False):
printline = '[' + ', '.join(list) + ']'
if not last:
printline = printline + ","
print printline
def AddMissingEntries(dictA, dictB, dictA2, dictB2,
start_col, stop_col):
for (k, v) in dictA.iteritems():
if k not in dictB:
dictB[k] = copy.deepcopy(dictA[k])
for i in range(start_col, stop_col + 1):
dictB[k][i] = 0
if dictA2 and k in dictA2:
if k not in dictB2:
dictB2[k] = {}
AddMissingEntries(dictA2[k], dictB2[k], None, None, start_col, stop_col)
def ConvertAndScale(dict, dict2, scale_array,
start_col, stop_col):
scale = 1.0
for (k, v) in dict.iteritems():
for i in range(start_col, stop_col + 1):
if scale_array:
scale = scale_array[i]
dict[k][i] = scale*float(dict[k][i])
if dict2 and k in dict2:
ConvertAndScale(dict2[k], None, scale_array, start_col, stop_col)
def ComputeChange(dictA, dictB, dictA2, dictB2,
change_dict, change_dict2,
start_col, stop_col):
for (k, v) in dictA.iteritems():
change_dict[k] = copy.deepcopy(dictB[k])
for i in range(start_col, stop_col + 1):
change_dict[k][i] = str(int(change_dict[k][i] + dictA[k][i]))
if dictA2 and k in dictA2:
change_dict2[k] = {}
ComputeChange(dictA2[k], dictB2[k], None, None,
change_dict2[k], None, start_col, stop_col)
def CompareDicts(ref_ss, new_ss):
for (k, v) in ref_ss.header_dict.iteritems():
if k != '"Multiplex"' and v != new_ss.header_dict[k]:
raise Error('Header lines not equal: "%s" != "%s"' % (v, new_ss.header_dict[k]))
start_col = ref_ss.FirstCol()
stop_col = ref_ss.LastCol()
ref_multiplex = ref_ss.header_dict['"Multiplex"']
new_multiplex = new_ss.header_dict['"Multiplex"']
multiplex_ratio = copy.deepcopy(new_multiplex)
for i in range(start_col, stop_col + 1):
multiplex_ratio[i] = float(multiplex_ratio[i])/float(ref_multiplex[i])
new_multiplex = ref_multiplex
#print multiplex_ratio
# First add entries found in one to the other if not there
AddMissingEntries(ref_ss.entriesL1_dict, new_ss.entriesL1_dict,
ref_ss.entriesL2_dict, new_ss.entriesL2_dict,
start_col, stop_col)
AddMissingEntries(new_ss.entriesL1_dict, ref_ss.entriesL1_dict,
new_ss.entriesL2_dict, ref_ss.entriesL2_dict,
start_col, stop_col)
ConvertAndScale(ref_ss.entriesL1_dict, ref_ss.entriesL2_dict, None,
start_col, stop_col)
ConvertAndScale(new_ss.entriesL1_dict, new_ss.entriesL2_dict, multiplex_ratio,
start_col, stop_col)
change_dictL1 = {}
change_dictL2 = {}
ComputeChange(ref_ss.entriesL1_dict, new_ss.entriesL1_dict,
ref_ss.entriesL2_dict, new_ss.entriesL2_dict,
change_dictL1, change_dictL2,
start_col, stop_col)
print '['
for h in ref_ss.header_order:
PrintCsvLine(ref_ss.header_dict[h])
globalsample = change_dictL1['\"Global sample breakdown\"']
del change_dictL1['\"Global sample breakdown\"']
for key, value in sorted(change_dictL1.iteritems(), key=lambda (k,v): abs(int(v[start_col])), reverse=True):
PrintCsvLine(value)
if key in change_dictL2:
for key2, value2 in sorted(change_dictL2[key].iteritems(), key=lambda (k,v): abs(int(v[start_col])), reverse=True):
PrintCsvLine(value2)
PrintCsvLine(globalsample, True)
print ']'
def main():
if len(sys.argv) != 3:
print 'usage: gooda_diff.py file_ref file_new'
sys.exit(1)
filename_ref = sys.argv[1]
filename_new = sys.argv[2]
ref_ss = BuildSpreadsheet(filename_ref)
new_ss = BuildSpreadsheet(filename_new)
CompareDicts(ref_ss, new_ss)
sys.exit(0)
if __name__ == '__main__':
main()
| apache-2.0 |
kerneltask/micropython | tests/extmod/uasyncio_event.py | 16 | 2217 | # Test Event class
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
async def task(id, ev):
print("start", id)
print(await ev.wait())
print("end", id)
async def task_delay_set(t, ev):
await asyncio.sleep(t)
print("set event")
ev.set()
async def main():
ev = asyncio.Event()
# Set and clear without anything waiting, and test is_set()
print(ev.is_set())
ev.set()
print(ev.is_set())
ev.clear()
print(ev.is_set())
# Create 2 tasks waiting on the event
print("----")
asyncio.create_task(task(1, ev))
asyncio.create_task(task(2, ev))
print("yield")
await asyncio.sleep(0)
print("set event")
ev.set()
print("yield")
await asyncio.sleep(0)
# Create a task waiting on the already-set event
print("----")
asyncio.create_task(task(3, ev))
print("yield")
await asyncio.sleep(0)
# Clear event, start a task, then set event again
print("----")
print("clear event")
ev.clear()
asyncio.create_task(task(4, ev))
await asyncio.sleep(0)
print("set event")
ev.set()
await asyncio.sleep(0)
# Cancel a task waiting on an event (set event then cancel task)
print("----")
ev = asyncio.Event()
t = asyncio.create_task(task(5, ev))
await asyncio.sleep(0)
ev.set()
t.cancel()
await asyncio.sleep(0.1)
# Cancel a task waiting on an event (cancel task then set event)
print("----")
ev = asyncio.Event()
t = asyncio.create_task(task(6, ev))
await asyncio.sleep(0)
t.cancel()
ev.set()
await asyncio.sleep(0.1)
# Wait for an event that does get set in time
print("----")
ev.clear()
asyncio.create_task(task_delay_set(0.01, ev))
await asyncio.wait_for(ev.wait(), 0.1)
await asyncio.sleep(0)
# Wait for an event that doesn't get set in time
print("----")
ev.clear()
asyncio.create_task(task_delay_set(0.1, ev))
try:
await asyncio.wait_for(ev.wait(), 0.01)
except asyncio.TimeoutError:
print("TimeoutError")
await ev.wait()
asyncio.run(main())
| mit |
berezovskyi/nikola | nikola/data/themes/base/messages/messages_sv.py | 8 | 1564 | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d minuter kvar att läsa",
"(active)": "(aktiv)",
"Also available in:": "Även tillgänglig på:",
"Archive": "Arkiv",
"Authors": "",
"Categories": "Kategorier",
"Comments": "Kommentarer",
"LANGUAGE": "Svenska",
"Languages:": "Språk:",
"More posts about %s": "Fler inlägg om %s",
"Newer posts": "Nya inlägg",
"Next post": "Nästa inlägg",
"No posts found.": "Inga inlägg hittade.",
"Nothing found.": "Inget hittat.",
"Older posts": "Äldre inlägg",
"Original site": "Originalsida",
"Posted:": "Publicerad:",
"Posts about %s": "Inlägg om %s",
"Posts by %s": "",
"Posts for year %s": "Inlägg för år %s",
"Posts for {month} {day}, {year}": "Inlägg för {month} {day}, {year}",
"Posts for {month} {year}": "Inlägg för {month} {year}",
"Previous post": "Föregående inlägg",
"Publication date": "Publiceringsdatum",
"RSS feed": "RSS-flöde",
"Read in English": "Läs på svenska",
"Read more": "Läs mer",
"Skip to main content": "Hoppa till huvudinnehåll",
"Source": "Källa",
"Subcategories:": "Underkategorier:",
"Tags and Categories": "Taggar och Kategorier",
"Tags": "Taggar",
"Uncategorized": "",
"Updates": "",
"Write your page here.": "Skriv din sida här.",
"Write your post here.": "Skriv ditt inlägg här.",
"old posts, page %d": "gamla inlägg, sida %d",
"page %d": "sida %d",
}
| mit |
raycarnes/stock-logistics-workflow | __unported__/stock_move_backdating/__openerp__.py | 14 | 1703 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012+ BREMSKERL-REIBBELAGWERKE EMMERLING GmbH & Co. KG
# Author Marco Dieckhoff
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Stock Move Backdating",
"version": "1.0",
'author': ['Marco Dieckhoff, BREMSKERL', 'Agile Business Group'],
"category": "Stock Logistics",
'website': 'www.bremskerl.com',
'license': 'AGPL-3',
"depends": ["stock"],
"summary": "Allows back-dating of stock moves",
"description": """This module allows to register old stock moves
(with date != now).
On stock moves, user can specify the "Actual Movement Date", that will be
used as movement date""",
'data': [
"view/stock_view.xml",
"wizard/stock_partial_picking_view.xml",
],
'demo': [],
'installable': False,
}
| agpl-3.0 |
supermarcos/log2bq | mapreduce/test_support.py | 25 | 4065 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to aid in testing mapreduces."""
import base64
import cgi
import os
import re
from mapreduce import main
from mapreduce import mock_webapp
from mapreduce import util
def decode_task_payload(task):
"""Decodes POST task payload.
Args:
task: a task to decode its payload.
Returns:
parameter_name -> parameter_value dict. If multiple parameter values are
present, then parameter_value will be a list.
"""
body = task["body"]
if not body:
return {}
decoded = base64.b64decode(body)
result = {}
for (name, value) in cgi.parse_qs(decoded).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return util.HugeTask.decode_payload(result)
def execute_task(task, handlers_map=None):
"""Execute mapper's executor task.
This will try to determine the correct mapper handler for the task, will set
up all mock environment necessary for task execution, and execute the task
itself.
This function can be used for functional-style testing of functionality
depending on mapper framework.
"""
if not handlers_map:
handlers_map = main.create_handlers_map()
url = task["url"]
handler = None
for (re_str, handler_class) in handlers_map:
re_str = "^" + re_str + "($|\\?)"
if re.match(re_str, url):
handler = handler_class()
break
if not handler:
raise Exception("Can't determine handler for %s" % task)
handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
handler.request.set_url(url)
handler.request.environ["HTTP_HOST"] = "myapp.appspot.com"
for k, v in task.get("headers", []):
handler.request.headers[k] = v
environ_key = "HTTP_" + k.replace("-", "_").upper()
handler.request.environ[environ_key] = v
handler.request.environ["HTTP_X_APPENGINE_TASKNAME"] = (
task.get("name", "default_task_name"))
handler.request.environ["HTTP_X_APPENGINE_QUEUENAME"] = (
task.get("queue_name", "default"))
handler.request.environ["PATH_INFO"] = handler.request.path
saved_os_environ = os.environ
try:
os.environ = dict(os.environ)
os.environ.update(handler.request.environ)
if task["method"] == "POST":
for k, v in decode_task_payload(task).items():
handler.request.set(k, v)
handler.post()
elif task["method"] == "GET":
handler.get()
else:
raise Exception("Unsupported method: %s" % task.method)
finally:
os.environ = saved_os_environ
if handler.response.status != 200:
raise Exception("Handler failure: %s (%s). \nTask: %s\nHandler: %s" %
(handler.response.status,
handler.response.status_message,
task,
handler))
def execute_all_tasks(taskqueue, queue="default", handlers_map=None):
"""Run and remove all tasks in the taskqueue.
Args:
taskqueue: An instance of taskqueue stub.
queue: Queue name to run all tasks from.
"""
tasks = taskqueue.GetTasks(queue)
taskqueue.FlushQueue(queue)
for task in tasks:
execute_task(task, handlers_map=handlers_map)
def execute_until_empty(taskqueue, queue="default", handlers_map=None):
"""Execute taskqueue tasks until it becomes empty.
Args:
taskqueue: An instance of taskqueue stub.
queue: Queue name to run all tasks from.
"""
while taskqueue.GetTasks(queue):
execute_all_tasks(taskqueue, queue, handlers_map)
| apache-2.0 |
zhanghui9700/eonboard | eoncloud_web/biz/account/views.py | 1 | 16684 | #-*-coding-utf-8-*-
import logging
from datetime import datetime
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import check_password
from biz.account.settings import QUOTA_ITEM, NotificationLevel
from biz.account.models import (Contract, Operation, Quota,
UserProxy, Notification, Feed, UserProfile)
from biz.account.serializer import (ContractSerializer, OperationSerializer,
UserSerializer, QuotaSerializer,
FeedSerializer, DetailedUserSerializer,
NotificationSerializer)
from biz.account.utils import get_quota_usage
from biz.idc.models import DataCenter
from biz.common.pagination import PagePagination
from biz.common.decorators import require_POST, require_GET
from biz.common.utils import retrieve_params
from cloud.tasks import (link_user_to_dc_task, send_notifications,
send_notifications_by_data_center)
from frontend.forms import CloudUserCreateFormWithoutCapatcha
LOG = logging.getLogger(__name__)
@api_view(["GET"])
def contract_view(request):
c = Contract.objects.filter(user=request.user,
udc__id=request.session["UDC_ID"])[0]
s = ContractSerializer(c)
return Response(s.data)
@api_view(["GET"])
def quota_view(request):
quota = get_quota_usage(request.user, request.session["UDC_ID"])
return Response(quota)
class OperationList(generics.ListAPIView):
queryset = Operation.objects
serializer_class = OperationSerializer
pagination_class = PagePagination
def get_queryset(self):
request = self.request
resource = request.query_params.get('resource')
resource_name = request.query_params.get('resource_name')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
queryset = super(OperationList, self).get_queryset()
if resource:
queryset = queryset.filter(resource=resource)
if resource_name:
queryset = queryset.filter(resource_name__istartswith=resource_name)
if start_date:
queryset = queryset.filter(create_date__gte=start_date)
if end_date:
queryset = queryset.filter(create_date__lte=end_date)
if request.user.is_superuser:
data_center_pk = request.query_params.get('data_center', '')
operator_pk = request.query_params.get('operator', '')
if data_center_pk:
queryset = queryset.filter(udc__data_center__pk=data_center_pk)
if operator_pk:
queryset = queryset.filter(user__pk=operator_pk)
else:
queryset = queryset.filter(user=request.user,
udc__id=request.session["UDC_ID"])
return queryset.order_by('-create_date')
@api_view()
def operation_filters(request):
resources = Operation.objects.values('resource').distinct()
for data in resources:
data['name'] = _(data['resource'])
return Response({
"resources": resources,
"operators": UserProxy.normal_users.values('pk', 'username'),
"data_centers": DataCenter.objects.values('pk', 'name')
})
class ContractList(generics.ListCreateAPIView):
queryset = Contract.living.filter(deleted=False)
serializer_class = ContractSerializer
def list(self, request, *args, **kwargs):
serializer = ContractSerializer(self.get_queryset(), many=True)
return Response(serializer.data)
class ContractDetail(generics.RetrieveAPIView):
queryset = Contract.living.all()
serializer_class = ContractSerializer
@api_view(['POST'])
def create_contract(request):
try:
serializer = ContractSerializer(data=request.data,
context={"request": request})
if serializer.is_valid():
contract = serializer.save()
Operation.log(contract, contract.name, 'create', udc=contract.udc,
user=request.user)
return Response({'success': True,
"msg": _('Contract is created successfully!')},
status=status.HTTP_201_CREATED)
else:
return Response({"success": False,
"msg": _('Contract data is not valid!'),
'errors': serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
LOG.error("Failed to create contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to create contract for unknown reason.')})
@api_view(['POST'])
def update_contract(request):
try:
pk = request.data['id']
contract = Contract.objects.get(pk=pk)
contract.name = request.data['name']
contract.customer = request.data['customer']
contract.start_date = datetime.strptime(request.data['start_date'],
'%Y-%m-%d %H:%M:%S')
contract.end_date = datetime.strptime(request.data['end_date'],
'%Y-%m-%d %H:%M:%S')
contract.save()
Operation.log(contract, contract.name, 'update', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contract is updated successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to update contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to update contract for unknown reason.')})
@api_view(['POST'])
def delete_contracts(request):
try:
contract_ids = request.data.getlist('contract_ids[]')
for contract_id in contract_ids:
contract = Contract.objects.get(pk=contract_id)
contract.deleted = True
contract.save()
Quota.living.filter(contract__pk=contract_id).update(deleted=True,
update_date=timezone.now())
Operation.log(contract, contract.name, 'delete', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contracts have been deleted!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to delete contracts, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to delete contracts for unknown reason.')})
class UserList(generics.ListAPIView):
queryset = UserProxy.normal_users
serializer_class = UserSerializer
def list(self, request, *args, **kwargs):
serializer = self.serializer_class(self.get_queryset(), many=True)
return Response(serializer.data)
@require_GET
def active_users(request):
queryset = UserProxy.normal_users.filter(is_active=True)
serializer = UserSerializer(queryset.all(), many=True)
return Response(serializer.data)
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = UserProxy.normal_users.all()
serializer_class = DetailedUserSerializer
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
@api_view(['POST'])
def deactivate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = False
user.save()
return Response({"success": True, "msg": _('User has been deactivated!')},
status=status.HTTP_200_OK)
@api_view(['POST'])
def activate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = True
user.save()
return Response({"success": True, "msg": _('User has been activated!')},
status=status.HTTP_200_OK)
@api_view(["POST"])
def change_password(request):
user = request.user
old_password = request.data['old_password']
new_password = request.data['new_password']
confirm_password = request.data['confirm_password']
if new_password != confirm_password:
return Response({"success": False, "msg": _(
"The new password doesn't match confirm password!")})
if not check_password(old_password, user.password):
return Response({"success": False,
"msg": _("The original password is not correct!")})
user.set_password(new_password)
user.save()
return Response({"success": True, "msg": _(
"Password has been changed! Please login in again.")})
class QuotaList(generics.ListAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if 'contract_id' in request.query_params:
queryset = queryset.filter(
contract__id=request.query_params['contract_id'])
return Response(self.serializer_class(queryset, many=True).data)
class QuotaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
@api_view(['GET'])
def resource_options(request):
return Response(QUOTA_ITEM)
@api_view(['POST'])
def create_quotas(request):
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
quota_ids = request.data.getlist('ids[]')
resources = request.data.getlist('resources[]')
limits = request.data.getlist('limits[]')
for index, quota_id in enumerate(quota_ids):
resource, limit = resources[index], limits[index]
if quota_id and Quota.living.filter(contract=contract,
pk=quota_id).exists():
Quota.objects.filter(pk=quota_id).update(resource=resource,
limit=limit,
update_date=timezone.now())
else:
Quota.objects.create(resource=resource, limit=limit,
contract=contract)
Operation.log(contract, contract.name + " quota", 'update',
udc=contract.udc, user=request.user)
return Response({'success': True,
"msg": _('Quotas have been saved successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quotas, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quotas for unknown reason.')})
@api_view(['POST'])
def create_quota(request):
try:
contract = Contract.objects.get(pk=request.data['contract'])
resource, limit = request.data['resource'], request.data['limit']
pk = request.data['id'] if 'id' in request.data else None
if pk and Quota.objects.filter(pk=pk).exists():
quota = Quota.objects.get(pk=pk)
quota.limit = limit
quota.save()
else:
quota = Quota.objects.create(resource=resource,
limit=limit,
contract=contract)
return Response({'success': True,
"msg": _('Quota have been saved successfully!'),
"quota": QuotaSerializer(quota).data},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quota, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quota for unknown reason.')})
@api_view(['POST'])
def delete_quota(request):
try:
Quota.living.filter(pk=request.data['id']).update(deleted=True)
return Response({'success': True,
"msg": _('Quota have been deleted successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to create quota, msg:[%s]" % e)
return Response(
{"success": False,
"msg": _('Failed to create quota for unknown reason.')}
)
@api_view(["GET"])
def get_config_view(request):
return Response(settings.SITE_CONFIG)
@require_GET
def notification_options(request):
return Response(NotificationLevel.OPTIONS)
@require_POST
def broadcast(request):
receiver_ids = request.data.getlist('receiver_ids[]')
level, title, content = retrieve_params(request.data,
'level', 'title', 'content')
send_notifications.delay(title, content, level, receiver_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def data_center_broadcast(request):
level, title, content = retrieve_params(
request.data, 'level', 'title', 'content')
dc_ids = request.data.getlist('data_centers[]')
send_notifications_by_data_center.delay(title, content, level, dc_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def announce(request):
level, title, content = retrieve_params(request.data, 'level', 'title',
'content')
Notification.objects.create(title=title, content=content,
level=level, is_announcement=True)
return Response({"success": True,
"msg": _('Announcement is sent successfully!')})
class NotificationList(generics.ListAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(is_auto=False).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class NotificationDetail(generics.RetrieveDestroyAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
class FeedList(generics.ListAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(receiver=request.user).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class FeedDetail(generics.RetrieveDestroyAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def perform_destroy(self, instance):
instance.fake_delete()
@require_GET
def feed_status(request):
Notification.pull_announcements(request.user)
num = Feed.living.filter(receiver=request.user, is_read=False).count()
return Response({"num": num})
@require_POST
def mark_read(request, pk):
Feed.living.get(pk=pk).mark_read()
return Response(status=status.HTTP_200_OK)
@require_POST
def initialize_user(request):
user_id = request.data['user_id']
user = User.objects.get(pk=user_id)
link_user_to_dc_task(user, DataCenter.get_default())
return Response({"success": True,
"msg": _("Initialization is successful.")})
@require_POST
def create_user(request):
user = User()
form = CloudUserCreateFormWithoutCapatcha(data=request.POST, instance=user)
if not form.is_valid():
return Response({
"success": False,
"msg": _("Data is not valid")
})
form.save()
link_user_to_dc_task.delay(user, DataCenter.get_default())
return Response({"success": True,
"msg": _("User is created successfully!")})
@require_GET
def is_username_unique(request):
username = request.GET['username']
return Response(not UserProxy.objects.filter(username=username).exists())
@require_GET
def is_email_unique(request):
email = request.GET['email']
return Response(not UserProxy.objects.filter(email=email).exists())
@require_GET
def is_mobile_unique(request):
mobile = request.GET['mobile']
return Response(not UserProfile.objects.filter(mobile=mobile).exists())
| apache-2.0 |
martindisch/Arduino | Watering/logger.py | 1 | 1285 | import os, serial, time, numpy
watchTime = 3600
measureInterval = 5
calcTime = 1.5
def receiving(ser):
global last_received
buffer_string = ''
while True:
buffer_string = buffer_string + ser.read(ser.inWaiting())
if '\n' in buffer_string:
lines = buffer_string.split('\n') # Guaranteed to have at least 2 entries
last_received = lines[-2]
buffer_string = lines[-1]
return last_received
def dateTime():
return time.strftime("%Y/%m/%d %H:%M:%S")
ser = serial.Serial('/dev/ttyACM0', 9600)
# Don't write immediately, the Arduino is restarting
time.sleep(3)
timePassed = 0
values = ()
startDate = "none"
while 1:
if timePassed >= watchTime:
f = open(os.path.expanduser('~') + "/waterlog.txt", 'a')
f.write(startDate + " - " + dateTime() + " " + str(numpy.mean(values)) + "\n")
f.close()
print dateTime() + " Wrote to file successfully"
timePassed = 0
values = ()
startDate = "none"
if "none" in startDate:
startDate = dateTime()
ser.write('4')
message = receiving(ser)
value = int(message[-4:])
values += (value,)
timePassed += measureInterval
time.sleep(measureInterval - calcTime)
| mit |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/Scanner/Dir.py | 21 | 3810 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
NL66278/odoo | addons/hr_timesheet_sheet/report/__init__.py | 342 | 1074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Meriipu/quodlibet | gdist/search_provider.py | 4 | 1904 | # Copyright 2013-2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from .util import Command
class install_search_provider(Command):
user_options = []
def initialize_options(self):
self.install_dir = None
self.search_provider = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_data', 'install_dir'))
self.search_provider = self.distribution.search_provider
def get_outputs(self):
return self.outfiles
def run(self):
basepath = os.path.join(
self.install_dir, 'share', 'gnome-shell', 'search-providers')
out = self.mkpath(basepath)
self.outfiles.extend(out or [])
(out, _) = self.copy_file(self.search_provider, basepath)
self.outfiles.append(out)
| gpl-2.0 |
vjmac15/Lyilis | lib/youtube_dl/extractor/voxmedia (VJ Washington's conflicted copy 2017-08-29).py | 42 | 6525 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class VoxMediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:theverge|vox|sbnation|eater|polygon|curbed|racked)\.com/(?:[^/]+/)*(?P<id>[^/?]+)'
_TESTS = [{
'url': 'http://www.theverge.com/2014/6/27/5849272/material-world-how-google-discovered-what-software-is-made-of',
'info_dict': {
'id': '11eXZobjrG8DCSTgrNjVinU-YmmdYjhe',
'ext': 'mp4',
'title': 'Google\'s new material design direction',
'description': 'md5:2f44f74c4d14a1f800ea73e1c6832ad2',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
# data-ooyala-id
'url': 'http://www.theverge.com/2014/10/21/7025853/google-nexus-6-hands-on-photos-video-android-phablet',
'md5': 'd744484ff127884cd2ba09e3fa604e4b',
'info_dict': {
'id': 'RkZXU4cTphOCPDMZg5oEounJyoFI0g-B',
'ext': 'mp4',
'title': 'The Nexus 6: hands-on with Google\'s phablet',
'description': 'md5:87a51fe95ff8cea8b5bdb9ac7ae6a6af',
},
'add_ie': ['Ooyala'],
}, {
# volume embed
'url': 'http://www.vox.com/2016/3/31/11336640/mississippi-lgbt-religious-freedom-bill',
'info_dict': {
'id': 'wydzk3dDpmRz7PQoXRsTIX6XTkPjYL0b',
'ext': 'mp4',
'title': 'The new frontier of LGBTQ civil rights, explained',
'description': 'md5:0dc58e94a465cbe91d02950f770eb93f',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
# youtube embed
'url': 'http://www.vox.com/2016/3/24/11291692/robot-dance',
'md5': '83b3080489fb103941e549352d3e0977',
'info_dict': {
'id': 'FcNHTJU1ufM',
'ext': 'mp4',
'title': 'How "the robot" became the greatest novelty dance of all time',
'description': 'md5:b081c0d588b8b2085870cda55e6da176',
'upload_date': '20160324',
'uploader_id': 'voxdotcom',
'uploader': 'Vox',
},
'add_ie': ['Youtube'],
}, {
# SBN.VideoLinkset.entryGroup multiple ooyala embeds
'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
'info_dict': {
'id': 'national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
'title': '25 lies you will tell yourself on National Signing Day',
'description': 'It\'s the most self-delusional time of the year, and everyone\'s gonna tell the same lies together!',
},
'playlist': [{
'md5': '721fededf2ab74ae4176c8c8cbfe092e',
'info_dict': {
'id': 'p3cThlMjE61VDi_SD9JlIteSNPWVDBB9',
'ext': 'mp4',
'title': 'Buddy Hield vs Steph Curry (and the world)',
'description': 'Let’s dissect only the most important Final Four storylines.',
},
}, {
'md5': 'bf0c5cc115636af028be1bab79217ea9',
'info_dict': {
'id': 'BmbmVjMjE6esPHxdALGubTrouQ0jYLHj',
'ext': 'mp4',
'title': 'Chasing Cinderella 2016: Syracuse basketball',
'description': 'md5:e02d56b026d51aa32c010676765a690d',
},
}],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = compat_urllib_parse_unquote(self._download_webpage(url, display_id))
def create_entry(provider_video_id, provider_video_type, title=None, description=None):
return {
'_type': 'url_transparent',
'url': provider_video_id if provider_video_type == 'youtube' else '%s:%s' % (provider_video_type, provider_video_id),
'title': title or self._og_search_title(webpage),
'description': description or self._og_search_description(webpage),
}
entries = []
entries_data = self._search_regex([
r'Chorus\.VideoContext\.addVideo\((\[{.+}\])\);',
r'var\s+entry\s*=\s*({.+});',
r'SBN\.VideoLinkset\.entryGroup\(\s*(\[.+\])',
], webpage, 'video data', default=None)
if entries_data:
entries_data = self._parse_json(entries_data, display_id)
if isinstance(entries_data, dict):
entries_data = [entries_data]
for video_data in entries_data:
provider_video_id = video_data.get('provider_video_id')
provider_video_type = video_data.get('provider_video_type')
if provider_video_id and provider_video_type:
entries.append(create_entry(
provider_video_id, provider_video_type,
video_data.get('title'), video_data.get('description')))
provider_video_id = self._search_regex(
r'data-ooyala-id="([^"]+)"', webpage, 'ooyala id', default=None)
if provider_video_id:
entries.append(create_entry(provider_video_id, 'ooyala'))
volume_uuid = self._search_regex(
r'data-volume-uuid="([^"]+)"', webpage, 'volume uuid', default=None)
if volume_uuid:
volume_webpage = self._download_webpage(
'http://volume.vox-cdn.com/embed/%s' % volume_uuid, volume_uuid)
video_data = self._parse_json(self._search_regex(
r'Volume\.createVideo\(({.+})\s*,\s*{.*}\s*,\s*\[.*\]\s*,\s*{.*}\);', volume_webpage, 'video data'), volume_uuid)
for provider_video_type in ('ooyala', 'youtube'):
provider_video_id = video_data.get('%s_id' % provider_video_type)
if provider_video_id:
description = video_data.get('description_long') or video_data.get('description_short')
entries.append(create_entry(
provider_video_id, provider_video_type, video_data.get('title_short'), description))
break
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries, display_id, self._og_search_title(webpage), self._og_search_description(webpage))
| gpl-3.0 |
kjw0106/boto | tests/db/test_sequence.py | 136 | 4065 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class TestDBHandler(object):
"""Test the DBHandler"""
def setup_class(cls):
"""Setup this class"""
cls.sequences = []
def teardown_class(cls):
"""Remove our sequences"""
for s in cls.sequences:
try:
s.delete()
except:
pass
def test_sequence_generator_no_rollover(self):
"""Test the sequence generator without rollover"""
from boto.sdb.db.sequence import SequenceGenerator
gen = SequenceGenerator("ABC")
assert(gen("") == "A")
assert(gen("A") == "B")
assert(gen("B") == "C")
assert(gen("C") == "AA")
assert(gen("AC") == "BA")
def test_sequence_generator_with_rollover(self):
"""Test the sequence generator with rollover"""
from boto.sdb.db.sequence import SequenceGenerator
gen = SequenceGenerator("ABC", rollover=True)
assert(gen("") == "A")
assert(gen("A") == "B")
assert(gen("B") == "C")
assert(gen("C") == "A")
def test_sequence_simple_int(self):
"""Test a simple counter sequence"""
from boto.sdb.db.sequence import Sequence
s = Sequence()
self.sequences.append(s)
assert(s.val == 0)
assert(s.next() == 1)
assert(s.next() == 2)
s2 = Sequence(s.id)
assert(s2.val == 2)
assert(s.next() == 3)
assert(s.val == 3)
assert(s2.val == 3)
def test_sequence_simple_string(self):
from boto.sdb.db.sequence import Sequence, increment_string
s = Sequence(fnc=increment_string)
self.sequences.append(s)
assert(s.val == "A")
assert(s.next() == "B")
def test_fib(self):
"""Test the fibonacci sequence generator"""
from boto.sdb.db.sequence import fib
# Just check the first few numbers in the sequence
lv = 0
for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]:
assert(fib(v, lv) == lv+v)
lv = fib(v, lv)
def test_sequence_fib(self):
"""Test the fibonacci sequence"""
from boto.sdb.db.sequence import Sequence, fib
s = Sequence(fnc=fib)
s2 = Sequence(s.id)
self.sequences.append(s)
assert(s.val == 1)
# Just check the first few numbers in the sequence
for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]:
assert(s.next() == v)
assert(s.val == v)
assert(s2.val == v) # it shouldn't matter which reference we use since it's garunteed to be consistent
def test_sequence_string(self):
"""Test the String incrementation sequence"""
from boto.sdb.db.sequence import Sequence, increment_string
s = Sequence(fnc=increment_string)
self.sequences.append(s)
assert(s.val == "A")
assert(s.next() == "B")
s.val = "Z"
assert(s.val == "Z")
assert(s.next() == "AA")
| mit |
Intel-bigdata/SSM | supports/integration-test/test_s3.py | 2 | 1750 | import argparse
import unittest
from util import *
# To run this script, HDFS should be installed locally and configured for S3 support.
# Please refer to s3-suppport.md in SSM repo's doc directory.
DEST_DIR = "s3a://xxxctest"
class TestS3(unittest.TestCase):
# copy to S3
def test_s3(self):
file_paths = []
cids = []
# create random directory
source_dir = TEST_DIR + random_string() + "/"
for i in range(MAX_NUMBER):
file_path, cid = create_random_file_parallel(FILE_SIZE, source_dir)
file_paths.append(file_path)
cids.append(cid)
failed_cids = wait_for_cmdlets(cids)
self.assertTrue(len(failed_cids) == 0)
# wait for DB sync
time.sleep(5)
# submit actions
cids = []
for i in range(MAX_NUMBER):
cids.append(copy_file_to_s3(file_paths[i],
DEST_DIR + file_paths[i]))
failed_cids = wait_for_cmdlets(cids)
self.assertTrue(len(failed_cids) == 0)
# delete files from S3
print "delete test file from S3"
subprocess.call("hadoop fs -rm -r " + DEST_DIR +
TEST_DIR, shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-size', default='1KB')
parser.add_argument('-num', default='10')
parser.add_argument('unittest_args', nargs='*')
args, unknown_args = parser.parse_known_args()
sys.argv[1:] = unknown_args
print "The file size for test is {}.".format(args.size)
FILE_SIZE = convert_to_byte(args.size)
print "The file number for test is {}.".format(args.num)
MAX_NUMBER = int(args.num)
unittest.main()
| apache-2.0 |
fahhem/plumbum | plumbum/commands/modifiers.py | 1 | 12344 | import os
from select import select
from subprocess import PIPE
import sys
from itertools import chain
from plumbum.commands.processes import run_proc, ProcessExecutionError
from plumbum.commands.base import AppendingStdoutRedirection, StdoutRedirection
from plumbum.lib import read_fd_decode_safely
class Future(object):
"""Represents a "future result" of a running process. It basically wraps a ``Popen``
object and the expected exit code, and provides poll(), wait(), returncode, stdout,
and stderr.
"""
def __init__(self, proc, expected_retcode, timeout = None):
self.proc = proc
self._expected_retcode = expected_retcode
self._timeout = timeout
self._returncode = None
self._stdout = None
self._stderr = None
def __repr__(self):
return "<Future %r (%s)>" % (self.proc.argv, self._returncode if self.ready() else "running",)
def poll(self):
"""Polls the underlying process for termination; returns ``False`` if still running,
or ``True`` if terminated"""
if self.proc.poll() is not None:
self.wait()
return self._returncode is not None
ready = poll
def wait(self):
"""Waits for the process to terminate; will raise a
:class:`plumbum.commands.ProcessExecutionError` in case of failure"""
if self._returncode is not None:
return
self._returncode, self._stdout, self._stderr = run_proc(self.proc,
self._expected_retcode, self._timeout)
@property
def stdout(self):
"""The process' stdout; accessing this property will wait for the process to finish"""
self.wait()
return self._stdout
@property
def stderr(self):
"""The process' stderr; accessing this property will wait for the process to finish"""
self.wait()
return self._stderr
@property
def returncode(self):
"""The process' returncode; accessing this property will wait for the process to finish"""
self.wait()
return self._returncode
#===================================================================================================
# execution modifiers
#===================================================================================================
class ExecutionModifier(object):
__slots__ = ("__weakref__",)
def __repr__(self):
"""Automatically creates a representation for given subclass with slots.
Ignore hidden properties."""
slots = {}
for cls in self.__class__.__mro__:
slots_list = getattr(cls, "__slots__", ())
if isinstance(slots_list, str):
slots_list = (slots_list,)
for prop in slots_list:
if prop[0] != '_':
slots[prop] = getattr(self, prop)
mystrs = ("{0} = {1}".format(name, slots[name]) for name in slots)
return "{0}({1})".format(self.__class__.__name__, ", ".join(mystrs))
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
class BG(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
__slots__ = ("retcode", "kargs")
def __init__(self, retcode=0, **kargs):
self.retcode = retcode
self.kargs = kargs
def __rand__(self, cmd):
return Future(cmd.popen(**self.kargs), self.retcode)
BG = BG()
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
class FG(ExecutionModifier):
"""
An execution modifier that runs the given command in the foreground, passing it the
current process' stdin, stdout and stderr. Useful for interactive programs that require
a TTY. There is no return value.
In order to mimic shell syntax, it applies when you right-and it with a command.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``FG(retcode)``. Example::
vim & FG # run vim in the foreground, expecting an exit code of 0
vim & FG(7) # run vim in the foreground, expecting an exit code of 7
"""
__slots__ = ("retcode",)
def __init__(self, retcode=0):
self.retcode = retcode
def __rand__(self, cmd):
cmd(retcode = self.retcode, stdin = None, stdout = None, stderr = None)
FG = FG()
class TEE(ExecutionModifier):
"""Run a command, dumping its stdout/stderr to the current process's stdout
and stderr, but ALSO return them. Useful for interactive programs that
expect a TTY but also have valuable output.
Use as:
ls["-l"] & TEE
Returns a tuple of (return code, stdout, stderr), just like ``run()``.
"""
__slots__ = ("retcode", "buffered")
def __init__(self, retcode=0, buffered=True):
"""`retcode` is the return code to expect to mean "success". Set
`buffered` to False to disable line-buffering the output, which may
cause stdout and stderr to become more entangled than usual.
"""
self.retcode = retcode
self.buffered = buffered
def __rand__(self, cmd):
with cmd.bgrun(retcode=self.retcode, stdin=None, stdout=PIPE, stderr=PIPE) as p:
outbuf = []
errbuf = []
out = p.stdout
err = p.stderr
buffers = {out: outbuf, err: errbuf}
tee_to = {out: sys.stdout, err: sys.stderr}
while p.poll() is None:
ready, _, _ = select((out, err), (), ())
for fd in ready:
buf = buffers[fd]
data, text = read_fd_decode_safely(fd, 4096)
if not data: # eof
continue
# Python conveniently line-buffers stdout and stderr for
# us, so all we need to do is write to them
# This will automatically add up to three bytes if it cannot be decoded
tee_to[fd].write(text)
# And then "unbuffered" is just flushing after each write
if not self.buffered:
tee_to[fd].flush()
buf.append(data)
stdout = ''.join([x.decode('utf-8') for x in outbuf])
stderr = ''.join([x.decode('utf-8') for x in errbuf])
return p.returncode, stdout, stderr
TEE = TEE()
class TF(ExecutionModifier):
"""
An execution modifier that runs the given command, but returns True/False depending on the retcode.
This returns True if the expected exit code is returned, and false if it is not.
This is useful for checking true/false bash commands.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``TF(retcode)``. If you want to run the process in the forground, then use
``TF(FG=True)``.
Example::
local['touch']['/root/test'] & TF * Returns False, since this cannot be touched
local['touch']['/root/test'] & TF(1) # Returns True
local['touch']['/root/test'] & TF(FG=True) * Returns False, will show error message
"""
__slots__ = ("retcode", "FG")
def __init__(self, retcode=0, FG=False):
"""`retcode` is the return code to expect to mean "success". Set
`FG` to True to run in the foreground.
"""
self.retcode = retcode
self.FG = FG
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
try:
if self.FG:
cmd(retcode = self.retcode, stdin = None, stdout = None, stderr = None)
else:
cmd(retcode = self.retcode)
return True
except ProcessExecutionError:
return False
TF = TF()
class RETCODE(ExecutionModifier):
"""
An execution modifier that runs the given command, causing it to run and return the retcode.
This is useful for working with bash commands that have important retcodes but not very
useful output.
If you want to run the process in the forground, then use ``RETCODE(FG=True)``.
Example::
local['touch']['/root/test'] & RETCODE # Returns 1, since this cannot be touched
local['touch']['/root/test'] & RETCODE(FG=True) * Returns 1, will show error message
"""
__slots__ = ("foreground",)
def __init__(self, FG=False):
"""`FG` to True to run in the foreground.
"""
self.foreground = FG
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
if self.foreground:
return cmd.run(retcode = None, stdin = None, stdout = None, stderr = None)[0]
else:
return cmd.run(retcode = None)[0]
RETCODE = RETCODE()
class NOHUP(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, disconnected
from the current process, returning a
standard popen object. It will keep running even if you close the current process.
In order to slightly mimic shell syntax, it applies
when you right-and it with a command. If you wish to use a diffent working directory
or different stdout, stderr, you can use named arguments. The default is ``NOHUP(
cwd=local.cwd, stdout='nohup.out', stderr=None)``. If stderr is None, stderr will be
sent to stdout. Use ``os.devnull`` for null output. Will respect redirected output.
Example::
sleep[5] & NOHUP # Outputs to nohup.out
sleep[5] & NOHUP(stdout=os.devnull) # No output
The equivelent bash command would be
.. code-block:: bash
nohup sleep 5 &
"""
__slots__ = ('cwd', 'stdout', 'stderr', 'append')
def __init__(self, cwd='.', stdout='nohup.out', stderr=None, append=True):
""" Set ``cwd``, ``stdout``, or ``stderr``.
Runs as a forked process. You can set ``append=False``, too.
"""
self.cwd = cwd
self.stdout = stdout
self.stderr = stderr
self.append = append
def __rand__(self, cmd):
if isinstance(cmd, StdoutRedirection):
stdout = cmd.file
append = False
cmd = cmd.cmd
elif isinstance(cmd, AppendingStdoutRedirection):
stdout = cmd.file
append = True
cmd = cmd.cmd
else:
stdout = self.stdout
append = self.append
return cmd.nohup(cmd, self.cwd, stdout, self.stderr, append)
NOHUP = NOHUP()
| mit |
chauhanhardik/populo_2 | lms/djangoapps/courseware/features/conditional.py | 102 | 4723 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, steps
from nose.tools import assert_in, assert_true # pylint: disable=no-name-in-module
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import add_problem_to_course, answer_problem
@steps
class ConditionalSteps(object):
COURSE_NUM = 'test_course'
def setup_conditional(self, step, condition_type, condition, cond_value):
r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$'
i_am_registered_for_the_course(step, self.COURSE_NUM)
world.scenario_dict['VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Vertical",
)
world.scenario_dict['WRAPPER'] = world.ItemFactory(
parent_location=world.scenario_dict['VERTICAL'].location,
category='wrapper',
display_name="Test Poll Wrapper"
)
if condition_type == 'problem':
world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string')
elif condition_type == 'poll':
world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='poll_question',
display_name='Conditional Poll',
data={
'question': 'Is this a good poll?',
'answers': [
{'id': 'yes', 'text': 'Yes, of course'},
{'id': 'no', 'text': 'Of course not!'}
],
}
)
else:
raise Exception("Unknown condition type: {!r}".format(condition_type))
metadata = {
'xml_attributes': {
condition: cond_value
}
}
world.scenario_dict['CONDITIONAL'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='conditional',
display_name="Test Conditional",
metadata=metadata,
sources_list=[world.scenario_dict['CONDITION_SOURCE'].location],
)
world.ItemFactory(
parent_location=world.scenario_dict['CONDITIONAL'].location,
category='html',
display_name='Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
def setup_problem_attempts(self, step, not_attempted=None):
r'that the conditioned problem has (?P<not_attempted>not )?been attempted$'
visit_scenario_item('CONDITION_SOURCE')
if not_attempted is None:
answer_problem(self.COURSE_NUM, 'string', True)
world.css_click("button.check")
def when_i_view_the_conditional(self, step):
r'I view the conditional$'
visit_scenario_item('CONDITIONAL')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")')
def check_visibility(self, step, visible):
r'the conditional contents are (?P<visible>\w+)$'
world.wait_for_ajax_complete()
assert_in(visible, ('visible', 'hidden'))
if visible == 'visible':
world.wait_for_visible('.hidden-contents')
assert_true(world.css_visible('.hidden-contents'))
else:
assert_true(world.is_css_not_present('.hidden-contents'))
assert_true(
world.css_contains_text(
'.conditional-message',
'must be attempted before this will become visible.'
)
)
def answer_poll(self, step, answer):
r' I answer the conditioned poll "([^"]*)"$'
visit_scenario_item('CONDITION_SOURCE')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")')
world.wait_for_ajax_complete()
answer_text = [
poll_answer['text']
for poll_answer
in world.scenario_dict['CONDITION_SOURCE'].answers
if poll_answer['id'] == answer
][0]
text_selector = '.poll_answer .text'
poll_texts = world.retry_on_exception(
lambda: [elem.text for elem in world.css_find(text_selector)]
)
for idx, poll_text in enumerate(poll_texts):
if poll_text == answer_text:
world.css_click(text_selector, index=idx)
return
ConditionalSteps()
| agpl-3.0 |
feend78/evennia | evennia/server/inputfuncs.py | 1 | 15810 | """
Functions for processing input commands.
All global functions in this module whose name does not start with "_"
is considered an inputfunc. Each function must have the following
callsign:
inputfunc(session, *args, **kwargs)
Where "options" is always one of the kwargs, containing eventual
protocol-options.
There is one special function, the "default" function, which is called
on a no-match. It has this callsign:
default(session, cmdname, *args, **kwargs)
Evennia knows which modules to use for inputfuncs by
settings.INPUT_FUNC_MODULES.
"""
from future.utils import viewkeys
import importlib
from django.conf import settings
from evennia.commands.cmdhandler import cmdhandler
from evennia.accounts.models import AccountDB
from evennia.utils.logger import log_err
from evennia.utils.utils import to_str, to_unicode
BrowserSessionStore = importlib.import_module(settings.SESSION_ENGINE).SessionStore
# always let "idle" work since we use this in the webclient
_IDLE_COMMAND = settings.IDLE_COMMAND
_IDLE_COMMAND = (_IDLE_COMMAND, ) if _IDLE_COMMAND == "idle" else (_IDLE_COMMAND, "idle")
_GA = object.__getattribute__
_SA = object.__setattr__
def _NA(o):
return "N/A"
_ERROR_INPUT = "Inputfunc {name}({session}): Wrong/unrecognized input: {inp}"
# All global functions are inputfuncs available to process inputs
def text(session, *args, **kwargs):
"""
Main text input from the client. This will execute a command
string on the server.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is used as text-command input. Other
arguments are ignored.
"""
#from evennia.server.profiling.timetrace import timetrace
#text = timetrace(text, "ServerSession.data_in")
txt = args[0] if args else None
# explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
if session.account:
# nick replacement
puppet = session.puppet
if puppet:
txt = puppet.nicks.nickreplace(txt,
categories=("inputline", "channel"), include_account=True)
else:
txt = session.account.nicks.nickreplace(txt,
categories=("inputline", "channel"), include_account=False)
kwargs.pop("options", None)
cmdhandler(session, txt, callertype="session", session=session, **kwargs)
session.update_session_counters()
def bot_data_in(session, *args, **kwargs):
"""
Text input from the IRC and RSS bots.
This will trigger the execute_cmd method on the bots in-game counterpart.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is text input. Other arguments are ignored.
"""
txt = args[0] if args else None
# Explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
kwargs.pop("options", None)
# Trigger the execute_cmd method of the corresponding bot.
session.account.execute_cmd(session=session, txt=txt, **kwargs)
session.update_session_counters()
def echo(session, *args, **kwargs):
"""
Echo test function
"""
session.data_out(text="Echo returns: %s" % args)
def default(session, cmdname, *args, **kwargs):
"""
Default catch-function. This is like all other input functions except
it will get `cmdname` as the first argument.
"""
err = "Session {sessid}: Input command not recognized:\n" \
" name: '{cmdname}'\n" \
" args, kwargs: {args}, {kwargs}".format(sessid=session.sessid,
cmdname=cmdname,
args=args,
kwargs=kwargs)
if session.protocol_flags.get("INPUTDEBUG", False):
session.msg(err)
log_err(err)
def client_options(session, *args, **kwargs):
"""
This allows the client an OOB way to inform us about its name and capabilities.
This will be integrated into the session settings
Kwargs:
get (bool): If this is true, return the settings as a dict
(ignore all other kwargs).
client (str): A client identifier, like "mushclient".
version (str): A client version
ansi (bool): Supports ansi colors
xterm256 (bool): Supports xterm256 colors or not
mxp (bool): Supports MXP or not
utf-8 (bool): Supports UTF-8 or not
screenreader (bool): Screen-reader mode on/off
mccp (bool): MCCP compression on/off
screenheight (int): Screen height in lines
screenwidth (int): Screen width in characters
inputdebug (bool): Debug input functions
nocolor (bool): Strip color
raw (bool): Turn off parsing
"""
flags = session.protocol_flags
if not kwargs or kwargs.get("get", False):
# return current settings
options = dict((key, flags[key]) for key in flags
if key.upper() in ("ANSI", "XTERM256", "MXP",
"UTF-8", "SCREENREADER", "ENCODING",
"MCCP", "SCREENHEIGHT",
"SCREENWIDTH", "INPUTDEBUG",
"RAW", "NOCOLOR",
"NOGOAHEAD"))
session.msg(client_options=options)
return
def validate_encoding(val):
# helper: change encoding
try:
to_str(to_unicode("test-string"), encoding=val)
except LookupError:
raise RuntimeError("The encoding '|w%s|n' is invalid. " % val)
return val
def validate_size(val):
return {0: int(val)}
def validate_bool(val):
if isinstance(val, basestring):
return True if val.lower() in ("true", "on", "1") else False
return bool(val)
for key, value in kwargs.iteritems():
key = key.lower()
if key == "client":
flags["CLIENTNAME"] = to_str(value)
elif key == "version":
if "CLIENTNAME" in flags:
flags["CLIENTNAME"] = "%s %s" % (flags["CLIENTNAME"], to_str(value))
elif key == "ENCODING":
flags["ENCODING"] = validate_encoding(value)
elif key == "ansi":
flags["ANSI"] = validate_bool(value)
elif key == "xterm256":
flags["XTERM256"] = validate_bool(value)
elif key == "mxp":
flags["MXP"] = validate_bool(value)
elif key == "utf-8":
flags["UTF-8"] = validate_bool(value)
elif key == "screenreader":
flags["SCREENREADER"] = validate_bool(value)
elif key == "mccp":
flags["MCCP"] = validate_bool(value)
elif key == "screenheight":
flags["SCREENHEIGHT"] = validate_size(value)
elif key == "screenwidth":
flags["SCREENWIDTH"] = validate_size(value)
elif key == "inputdebug":
flags["INPUTDEBUG"] = validate_bool(value)
elif key == "nocolor":
flags["NOCOLOR"] = validate_bool(value)
elif key == "raw":
flags["RAW"] = validate_bool(value)
elif key == "nogoahead":
flags["NOGOAHEAD"] = validate_bool(value)
elif key in ('Char 1', 'Char.Skills 1', 'Char.Items 1',
'Room 1', 'IRE.Rift 1', 'IRE.Composer 1'):
# ignore mudlet's default send (aimed at IRE games)
pass
elif key not in ("options", "cmdid"):
err = _ERROR_INPUT.format(
name="client_settings", session=session, inp=key)
session.msg(text=err)
session.protocol_flags = flags
# we must update the portal as well
session.sessionhandler.session_portal_sync(session)
# GMCP alias
hello = client_options
supports_set = client_options
def get_client_options(session, *args, **kwargs):
"""
Alias wrapper for getting options.
"""
client_options(session, get=True)
def get_inputfuncs(session, *args, **kwargs):
"""
Get the keys of all available inputfuncs. Note that we don't get
it from this module alone since multiple modules could be added.
So we get it from the sessionhandler.
"""
inputfuncsdict = dict((key, func.__doc__) for key, func
in session.sessionhandler.get_inputfuncs().iteritems())
session.msg(get_inputfuncs=inputfuncsdict)
def login(session, *args, **kwargs):
"""
Peform a login. This only works if session is currently not logged
in. This will also automatically throttle too quick attempts.
Kwargs:
name (str): Account name
password (str): Plain-text password
"""
if not session.logged_in and "name" in kwargs and "password" in kwargs:
from evennia.commands.default.unloggedin import create_normal_account
account = create_normal_account(session, kwargs["name"], kwargs["password"])
if account:
session.sessionhandler.login(session, account)
_gettable = {
"name": lambda obj: obj.key,
"key": lambda obj: obj.key,
"location": lambda obj: obj.location.key if obj.location else "None",
"servername": lambda obj: settings.SERVERNAME
}
def get_value(session, *args, **kwargs):
"""
Return the value of a given attribute or db_property on the
session's current account or character.
Kwargs:
name (str): Name of info value to return. Only names
in the _gettable dictionary earlier in this module
are accepted.
"""
name = kwargs.get("name", "")
obj = session.puppet or session.account
if name in _gettable:
session.msg(get_value={"name": name, "value": _gettable[name](obj)})
def _testrepeat(**kwargs):
"""
This is a test function for using with the repeat
inputfunc.
Kwargs:
session (Session): Session to return to.
"""
import time
kwargs["session"].msg(repeat="Repeat called: %s" % time.time())
_repeatable = {"test1": _testrepeat, # example only
"test2": _testrepeat} # "
def repeat(session, *args, **kwargs):
"""
Call a named function repeatedly. Note that
this is meant as an example of limiting the number of
possible call functions.
Kwargs:
callback (str): The function to call. Only functions
from the _repeatable dictionary earlier in this
module are available.
interval (int): How often to call function (s).
Defaults to once every 60 seconds with a minimum
of 5 seconds.
stop (bool): Stop a previously assigned ticker with
the above settings.
"""
from evennia.scripts.tickerhandler import TICKER_HANDLER
name = kwargs.get("callback", "")
interval = max(5, int(kwargs.get("interval", 60)))
if name in _repeatable:
if kwargs.get("stop", False):
TICKER_HANDLER.remove(interval, _repeatable[name], idstring=session.sessid, persistent=False)
else:
TICKER_HANDLER.add(interval, _repeatable[name], idstring=session.sessid, persistent=False, session=session)
else:
session.msg("Allowed repeating functions are: %s" % (", ".join(_repeatable)))
def unrepeat(session, *args, **kwargs):
"Wrapper for OOB use"
kwargs["stop"] = True
repeat(session, *args, **kwargs)
_monitorable = {
"name": "db_key",
"location": "db_location",
"desc": "desc"
}
def _on_monitor_change(**kwargs):
fieldname = kwargs["fieldname"]
obj = kwargs["obj"]
name = kwargs["name"]
session = kwargs["session"]
# the session may be None if the char quits and someone
# else then edits the object
if session:
session.msg(monitor={"name": name, "value": _GA(obj, fieldname)})
def monitor(session, *args, **kwargs):
"""
Adds monitoring to a given property or Attribute.
Kwargs:
name (str): The name of the property or Attribute
to report. No db_* prefix is needed. Only names
in the _monitorable dict earlier in this module
are accepted.
stop (bool): Stop monitoring the above name.
"""
from evennia.scripts.monitorhandler import MONITOR_HANDLER
name = kwargs.get("name", None)
if name and name in _monitorable and session.puppet:
field_name = _monitorable[name]
obj = session.puppet
if kwargs.get("stop", False):
MONITOR_HANDLER.remove(obj, field_name, idstring=session.sessid)
else:
# the handler will add fieldname and obj to the kwargs automatically
MONITOR_HANDLER.add(obj, field_name, _on_monitor_change, idstring=session.sessid,
persistent=False, name=name, session=session)
def unmonitor(session, *args, **kwargs):
"""
Wrapper for turning off monitoring
"""
kwargs["stop"] = True
monitor(session, *args, **kwargs)
def _on_webclient_options_change(**kwargs):
"""
Called when the webclient options stored on the account changes.
Inform the interested clients of this change.
"""
session = kwargs["session"]
obj = kwargs["obj"]
fieldname = kwargs["fieldname"]
clientoptions = _GA(obj, fieldname)
# the session may be None if the char quits and someone
# else then edits the object
if session:
session.msg(webclient_options=clientoptions)
def webclient_options(session, *args, **kwargs):
"""
Handles retrieving and changing of options related to the webclient.
If kwargs is empty (or contains just a "cmdid"), the saved options will be
sent back to the session.
A monitor handler will be created to inform the client of any future options
that changes.
If kwargs is not empty, the key/values stored in there will be persisted
to the account object.
Kwargs:
<option name>: an option to save
"""
account = session.account
clientoptions = account.db._saved_webclient_options
if not clientoptions:
# No saved options for this account, copy and save the default.
account.db._saved_webclient_options = settings.WEBCLIENT_OPTIONS.copy()
# Get the _SaverDict created by the database.
clientoptions = account.db._saved_webclient_options
# The webclient adds a cmdid to every kwargs, but we don't need it.
try:
del kwargs["cmdid"]
except KeyError:
pass
if not kwargs:
# No kwargs: we are getting the stored options
# Convert clientoptions to regular dict for sending.
session.msg(webclient_options=dict(clientoptions))
# Create a monitor. If a monitor already exists then it will replace
# the previous one since it would use the same idstring
from evennia.scripts.monitorhandler import MONITOR_HANDLER
MONITOR_HANDLER.add(account, "_saved_webclient_options",
_on_webclient_options_change,
idstring=session.sessid, persistent=False,
session=session)
else:
# kwargs provided: persist them to the account object
for key, value in kwargs.iteritems():
clientoptions[key] = value
| bsd-3-clause |
williballenthin/ida-netnode | netnode/test_netnode.py | 1 | 4807 | import random
import string
import logging
import contextlib
import idaapi
from netnode import netnode
# get the IDA version number
ida_major, ida_minor = list(map(int, idaapi.get_kernel_version().split(".")))
using_ida7api = (ida_major > 6)
TEST_NAMESPACE = '$ some.namespace'
def get_random_data(N):
'''
returns:
str: a string containing N ASCII characters.
'''
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
@contextlib.contextmanager
def killing_netnode(namespace):
'''
wraps a netnode in a contextmanager that will
eventually destroy its contents.
probably only useful for testing when a clean state is req'd.
'''
n = netnode.Netnode(namespace)
try:
yield n
finally:
n.kill()
def test_basic_features():
'''
demonstrate the basic netnode API (like a dict)
'''
with killing_netnode(TEST_NAMESPACE) as n:
# there is nothing in the netnode to begin with
assert(False == (1 in n))
# when we add one key, there is one thing in it
n[1] = 'hello'
assert(True == (1 in n))
assert(n[1] == 'hello')
# but nothing else
assert(False == ('2' in n))
# then when we add a second thing, its also there
n['2'] = 'world'
assert(True == ('2' in n))
assert(len(list(n.keys())) == 2)
assert(list(n.keys())[0] == 1)
assert(list(n.keys())[1] == '2')
assert(len(list(n.values())) == 2)
assert(list(n.values())[0] == 'hello')
assert(list(n.values())[1] == 'world')
assert(len(list(n.items())) == 2)
# and when we delete the first item, only it is deleted
del n[1]
assert(False == (1 in n))
# and finally everything is gone
del n['2']
def test_large_data():
'''
demonstrate that netnodes support large data values.
'''
with killing_netnode(TEST_NAMESPACE) as n:
random_data = get_random_data(1024 * 8)
n[3] = random_data
assert(n[3] == random_data)
del n[3]
assert(dict(n) == {})
def test_hash_ordering():
'''
the following demonstrates that 'hashes' are iterated alphabetically.
this is an IDAPython implementation feature.
'''
with killing_netnode(TEST_NAMESPACE) as n:
m = n._n
def hashiter(m):
i = None
if using_ida7api:
i = m.hashfirst()
else:
i = m.hash1st()
while i != idaapi.BADNODE and i is not None:
yield i
if using_ida7api:
i = m.hashnext(i)
else:
i = m.hashnxt(i)
def get_hash_order(hiter):
return [k for k in hiter]
m.hashset('a', b'a')
assert get_hash_order(hashiter(m)) == ['a']
m.hashset('c', b'c')
assert get_hash_order(hashiter(m)) == ['a', 'c']
m.hashset('b', b'b')
assert get_hash_order(hashiter(m)) == ['a', 'b', 'c']
def test_iterkeys():
LARGE_VALUE = get_random_data(16 * 1024)
LARGE_VALUE2 = get_random_data(16 * 1024)
import zlib
assert(len(zlib.compress(LARGE_VALUE.encode("ascii"))) > 1024)
assert(len(zlib.compress(LARGE_VALUE2.encode("ascii"))) > 1024)
assert LARGE_VALUE != LARGE_VALUE2
with killing_netnode(TEST_NAMESPACE) as n:
n[1] = LARGE_VALUE
assert set(n.keys()) == set([1])
n[2] = LARGE_VALUE2
assert set(n.keys()) == set([1, 2])
assert n[1] != n[2]
with killing_netnode(TEST_NAMESPACE) as n:
n['one'] = LARGE_VALUE
assert set(n.keys()) == set(['one'])
n['two'] = LARGE_VALUE2
assert set(n.keys()) == set(['one', 'two'])
assert n['one'] != n['two']
with killing_netnode(TEST_NAMESPACE) as n:
n[1] = LARGE_VALUE
assert set(n.keys()) == set([1])
n[2] = LARGE_VALUE
assert set(n.keys()) == set([1, 2])
n['one'] = LARGE_VALUE
assert set(n.keys()) == set([1, 2, 'one'])
n['two'] = LARGE_VALUE
assert set(n.keys()) == set([1, 2, 'one', 'two'])
n[3] = "A"
assert set(n.keys()) == set([1, 2, 'one', 'two', 3])
n['three'] = "A"
assert set(n.keys()) == set([1, 2, 'one', 'two', 3, 'three'])
def main():
logging.basicConfig(level=logging.DEBUG)
# cleanup any existing data
netnode.Netnode(TEST_NAMESPACE).kill()
# rely on assert crashing the interpreter to indicate failure.
# pytest no longer works on py3 idapython.
test_basic_features()
test_large_data()
test_hash_ordering()
test_iterkeys()
print("netnode: tests: pass")
if __name__ == '__main__':
main()
| apache-2.0 |
pshen/ansible | lib/ansible/modules/network/iosxr/iosxr_config.py | 32 | 11013 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_config
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage Cisco IOS XR configuration sections
description:
- Cisco IOS XR configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with IOS XR configuration sections in
a deterministic way.
extends_documentation_fragment: iosxr
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "yes", "no" ]
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
required: false
default: 'configured by iosxr_config'
version_added: "2.2"
"""
EXAMPLES = """
- name: configure top level configuration
iosxr_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
iosxr_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface GigabitEthernet0/0/0/0
- name: load a config from disk and replace the current config
iosxr_config:
src: config.cfg
backup: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/iosxr01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.iosxr import load_config,get_config
from ansible.module_utils.iosxr import iosxr_argument_spec
DEFAULT_COMMIT_COMMENT = 'configured by iosxr_config'
def check_args(module, warnings):
if module.params['comment']:
if len(module.params['comment']) > 60:
module.fail_json(msg='comment argument cannot be more than 60 characters')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands, result['warnings'],
not check_mode, replace_config, comment)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
backup=dict(type='bool', default=False),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
)
argument_spec.update(iosxr_argument_spec)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
aneeshusa/android-quill | jni/libhpdf-2.3.0RC2/if/python/hpdf_consts.py | 32 | 19968 | ###
## * << Haru Free PDF Library 2.0.8 >> -- hpdf_consts.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from hpdf_types import *
#----------------------------------------------------------------------------
HPDF_TRUE =1
HPDF_FALSE =0
HPDF_OK =0
HPDF_NOERROR =0
#----- default values -------------------------------------------------------
# buffer size which is required when we convert to character string.
HPDF_TMP_BUF_SIZ =512
HPDF_SHORT_BUF_SIZ =32
HPDF_REAL_LEN =11
HPDF_INT_LEN =11
HPDF_TEXT_DEFAULT_LEN =256
HPDF_UNICODE_HEADER_LEN =2
HPDF_DATE_TIME_STR_LEN =23
# length of each item defined in PDF
HPDF_BYTE_OFFSET_LEN =10
HPDF_OBJ_ID_LEN =7
HPDF_GEN_NO_LEN =5
# default value of Graphic State
HPDF_DEF_FONT ="Helvetica"
HPDF_DEF_PAGE_LAYOUT =HPDF_PAGE_LAYOUT_SINGLE
HPDF_DEF_PAGE_MODE =HPDF_PAGE_MODE_USE_NONE
HPDF_DEF_WORDSPACE =0
HPDF_DEF_CHARSPACE =0
HPDF_DEF_FONTSIZE =10
HPDF_DEF_HSCALING =100
HPDF_DEF_LEADING =0
HPDF_DEF_RENDERING_MODE =HPDF_FILL
HPDF_DEF_RISE =0
HPDF_DEF_RAISE =HPDF_DEF_RISE
HPDF_DEF_LINEWIDTH =1
HPDF_DEF_LINECAP =HPDF_BUTT_END
HPDF_DEF_LINEJOIN =HPDF_MITER_JOIN
HPDF_DEF_MITERLIMIT =10
HPDF_DEF_FLATNESS =1
HPDF_DEF_PAGE_NUM =1
HPDF_BS_DEF_WIDTH =1
# defalt page-size
HPDF_DEF_PAGE_WIDTH =595.276
HPDF_DEF_PAGE_HEIGHT =841.89
HPDF_VERSION_TEXT ="2.0.8"
#---------------------------------------------------------------------------
#----- compression mode ----------------------------------------------------
HPDF_COMP_NONE =0x00
HPDF_COMP_TEXT =0x01
HPDF_COMP_IMAGE =0x02
HPDF_COMP_METADATA =0x04
HPDF_COMP_ALL =0x0F
HPDF_COMP_BEST_COMPRESS =0x10
HPDF_COMP_BEST_SPEED =0x20
HPDF_COMP_MASK =0xFF
#----------------------------------------------------------------------------
#----- permission flags (only Revision 2 is supported)-----------------------
HPDF_ENABLE_READ =0
HPDF_ENABLE_PRINT =4
HPDF_ENABLE_EDIT_ALL =8
HPDF_ENABLE_COPY =16
HPDF_ENABLE_EDIT =32
#----------------------------------------------------------------------------
#------ viewer preferences definitions --------------------------------------
HPDF_HIDE_TOOLBAR =1
HPDF_HIDE_MENUBAR =2
HPDF_HIDE_WINDOW_UI =4
HPDF_FIT_WINDOW =8
HPDF_CENTER_WINDOW =16
#---------------------------------------------------------------------------
#------ limitation of object implementation (PDF1.4) -----------------------
HPDF_LIMIT_MAX_INT =2147483647
HPDF_LIMIT_MIN_INT =-2147483647
HPDF_LIMIT_MAX_REAL =32767
HPDF_LIMIT_MIN_REAL =-32767
HPDF_LIMIT_MAX_STRING_LEN =65535
HPDF_LIMIT_MAX_NAME_LEN =127
HPDF_LIMIT_MAX_ARRAY =8191
HPDF_LIMIT_MAX_DICT_ELEMENT =4095
HPDF_LIMIT_MAX_XREF_ELEMENT =8388607
HPDF_LIMIT_MAX_GSTATE =28
HPDF_LIMIT_MAX_DEVICE_N =8
HPDF_LIMIT_MAX_DEVICE_N_V15 =32
HPDF_LIMIT_MAX_CID =65535
HPDF_MAX_GENERATION_NUM =65535
HPDF_MIN_PAGE_HEIGHT =3
HPDF_MIN_PAGE_WIDTH =3
HPDF_MAX_PAGE_HEIGHT =14400
HPDF_MAX_PAGE_WIDTH =14400
HPDF_MIN_MAGNIFICATION_FACTOR =8
HPDF_MAX_MAGNIFICATION_FACTOR =3200
#---------------------------------------------------------------------------
#------ limitation of various properties -----------------------------------
HPDF_MIN_PAGE_SIZE =3
HPDF_MAX_PAGE_SIZE =14400
HPDF_MIN_HORIZONTALSCALING =10
HPDF_MAX_HORIZONTALSCALING =300
HPDF_MIN_WORDSPACE =-30
HPDF_MAX_WORDSPACE =300
HPDF_MIN_CHARSPACE =-30
HPDF_MAX_CHARSPACE =300
HPDF_MAX_FONTSIZE =300
HPDF_MAX_ZOOMSIZE =10
HPDF_MAX_LEADING =300
HPDF_MAX_LINEWIDTH =100
HPDF_MAX_DASH_PATTERN =100
HPDF_MAX_JWW_NUM =128
#----------------------------------------------------------------------------
#----- country code definition ----------------------------------------------
HPDF_COUNTRY_AF ="AF" # AFGHANISTAN
HPDF_COUNTRY_AL ="AL" # ALBANIA
HPDF_COUNTRY_DZ ="DZ" # ALGERIA
HPDF_COUNTRY_AS ="AS" # AMERICAN SAMOA
HPDF_COUNTRY_AD ="AD" # ANDORRA
HPDF_COUNTRY_AO ="AO" # ANGOLA
HPDF_COUNTRY_AI ="AI" # ANGUILLA
HPDF_COUNTRY_AQ ="AQ" # ANTARCTICA
HPDF_COUNTRY_AG ="AG" # ANTIGUA AND BARBUDA
HPDF_COUNTRY_AR ="AR" # ARGENTINA
HPDF_COUNTRY_AM ="AM" # ARMENIA
HPDF_COUNTRY_AW ="AW" # ARUBA
HPDF_COUNTRY_AU ="AU" # AUSTRALIA
HPDF_COUNTRY_AT ="AT" # AUSTRIA
HPDF_COUNTRY_AZ ="AZ" # AZERBAIJAN
HPDF_COUNTRY_BS ="BS" # BAHAMAS
HPDF_COUNTRY_BH ="BH" # BAHRAIN
HPDF_COUNTRY_BD ="BD" # BANGLADESH
HPDF_COUNTRY_BB ="BB" # BARBADOS
HPDF_COUNTRY_BY ="BY" # BELARUS
HPDF_COUNTRY_BE ="BE" # BELGIUM
HPDF_COUNTRY_BZ ="BZ" # BELIZE
HPDF_COUNTRY_BJ ="BJ" # BENIN
HPDF_COUNTRY_BM ="BM" # BERMUDA
HPDF_COUNTRY_BT ="BT" # BHUTAN
HPDF_COUNTRY_BO ="BO" # BOLIVIA
HPDF_COUNTRY_BA ="BA" # BOSNIA AND HERZEGOWINA
HPDF_COUNTRY_BW ="BW" # BOTSWANA
HPDF_COUNTRY_BV ="BV" # BOUVET ISLAND
HPDF_COUNTRY_BR ="BR" # BRAZIL
HPDF_COUNTRY_IO ="IO" # BRITISH INDIAN OCEAN TERRITORY
HPDF_COUNTRY_BN ="BN" # BRUNEI DARUSSALAM
HPDF_COUNTRY_BG ="BG" # BULGARIA
HPDF_COUNTRY_BF ="BF" # BURKINA FASO
HPDF_COUNTRY_BI ="BI" # BURUNDI
HPDF_COUNTRY_KH ="KH" # CAMBODIA
HPDF_COUNTRY_CM ="CM" # CAMEROON
HPDF_COUNTRY_CA ="CA" # CANADA
HPDF_COUNTRY_CV ="CV" # CAPE VERDE
HPDF_COUNTRY_KY ="KY" # CAYMAN ISLANDS
HPDF_COUNTRY_CF ="CF" # CENTRAL AFRICAN REPUBLIC
HPDF_COUNTRY_TD ="TD" # CHAD
HPDF_COUNTRY_CL ="CL" # CHILE
HPDF_COUNTRY_CN ="CN" # CHINA
HPDF_COUNTRY_CX ="CX" # CHRISTMAS ISLAND
HPDF_COUNTRY_CC ="CC" # COCOS (KEELING) ISLANDS
HPDF_COUNTRY_CO ="CO" # COLOMBIA
HPDF_COUNTRY_KM ="KM" # COMOROS
HPDF_COUNTRY_CG ="CG" # CONGO
HPDF_COUNTRY_CK ="CK" # COOK ISLANDS
HPDF_COUNTRY_CR ="CR" # COSTA RICA
HPDF_COUNTRY_CI ="CI" # COTE D'IVOIRE
HPDF_COUNTRY_HR ="HR" # CROATIA (local name: Hrvatska)
HPDF_COUNTRY_CU ="CU" # CUBA
HPDF_COUNTRY_CY ="CY" # CYPRUS
HPDF_COUNTRY_CZ ="CZ" # CZECH REPUBLIC
HPDF_COUNTRY_DK ="DK" # DENMARK
HPDF_COUNTRY_DJ ="DJ" # DJIBOUTI
HPDF_COUNTRY_DM ="DM" # DOMINICA
HPDF_COUNTRY_DO ="DO" # DOMINICAN REPUBLIC
HPDF_COUNTRY_TP ="TP" # EAST TIMOR
HPDF_COUNTRY_EC ="EC" # ECUADOR
HPDF_COUNTRY_EG ="EG" # EGYPT
HPDF_COUNTRY_SV ="SV" # EL SALVADOR
HPDF_COUNTRY_GQ ="GQ" # EQUATORIAL GUINEA
HPDF_COUNTRY_ER ="ER" # ERITREA
HPDF_COUNTRY_EE ="EE" # ESTONIA
HPDF_COUNTRY_ET ="ET" # ETHIOPIA
HPDF_COUNTRY_FK ="FK" # FALKLAND ISLANDS (MALVINAS)
HPDF_COUNTRY_FO ="FO" # FAROE ISLANDS
HPDF_COUNTRY_FJ ="FJ" # FIJI
HPDF_COUNTRY_FI ="FI" # FINLAND
HPDF_COUNTRY_FR ="FR" # FRANCE
HPDF_COUNTRY_FX ="FX" # FRANCE, METROPOLITAN
HPDF_COUNTRY_GF ="GF" # FRENCH GUIANA
HPDF_COUNTRY_PF ="PF" # FRENCH POLYNESIA
HPDF_COUNTRY_TF ="TF" # FRENCH SOUTHERN TERRITORIES
HPDF_COUNTRY_GA ="GA" # GABON
HPDF_COUNTRY_GM ="GM" # GAMBIA
HPDF_COUNTRY_GE ="GE" # GEORGIA
HPDF_COUNTRY_DE ="DE" # GERMANY
HPDF_COUNTRY_GH ="GH" # GHANA
HPDF_COUNTRY_GI ="GI" # GIBRALTAR
HPDF_COUNTRY_GR ="GR" # GREECE
HPDF_COUNTRY_GL ="GL" # GREENLAND
HPDF_COUNTRY_GD ="GD" # GRENADA
HPDF_COUNTRY_GP ="GP" # GUADELOUPE
HPDF_COUNTRY_GU ="GU" # GUAM
HPDF_COUNTRY_GT ="GT" # GUATEMALA
HPDF_COUNTRY_GN ="GN" # GUINEA
HPDF_COUNTRY_GW ="GW" # GUINEA-BISSAU
HPDF_COUNTRY_GY ="GY" # GUYANA
HPDF_COUNTRY_HT ="HT" # HAITI
HPDF_COUNTRY_HM ="HM" # HEARD AND MC DONALD ISLANDS
HPDF_COUNTRY_HN ="HN" # HONDURAS
HPDF_COUNTRY_HK ="HK" # HONG KONG
HPDF_COUNTRY_HU ="HU" # HUNGARY
HPDF_COUNTRY_IS ="IS" # ICELAND
HPDF_COUNTRY_IN ="IN" # INDIA
HPDF_COUNTRY_ID ="ID" # INDONESIA
HPDF_COUNTRY_IR ="IR" # IRAN (ISLAMIC REPUBLIC OF)
HPDF_COUNTRY_IQ ="IQ" # IRAQ
HPDF_COUNTRY_IE ="IE" # IRELAND
HPDF_COUNTRY_IL ="IL" # ISRAEL
HPDF_COUNTRY_IT ="IT" # ITALY
HPDF_COUNTRY_JM ="JM" # JAMAICA
HPDF_COUNTRY_JP ="JP" # JAPAN
HPDF_COUNTRY_JO ="JO" # JORDAN
HPDF_COUNTRY_KZ ="KZ" # KAZAKHSTAN
HPDF_COUNTRY_KE ="KE" # KENYA
HPDF_COUNTRY_KI ="KI" # KIRIBATI
HPDF_COUNTRY_KP ="KP" # KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
HPDF_COUNTRY_KR ="KR" # KOREA, REPUBLIC OF
HPDF_COUNTRY_KW ="KW" # KUWAIT
HPDF_COUNTRY_KG ="KG" # KYRGYZSTAN
HPDF_COUNTRY_LA ="LA" # LAO PEOPLE'S DEMOCRATIC REPUBLIC
HPDF_COUNTRY_LV ="LV" # LATVIA
HPDF_COUNTRY_LB ="LB" # LEBANON
HPDF_COUNTRY_LS ="LS" # LESOTHO
HPDF_COUNTRY_LR ="LR" # LIBERIA
HPDF_COUNTRY_LY ="LY" # LIBYAN ARAB JAMAHIRIYA
HPDF_COUNTRY_LI ="LI" # LIECHTENSTEIN
HPDF_COUNTRY_LT ="LT" # LITHUANIA
HPDF_COUNTRY_LU ="LU" # LUXEMBOURG
HPDF_COUNTRY_MO ="MO" # MACAU
HPDF_COUNTRY_MK ="MK" # MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
HPDF_COUNTRY_MG ="MG" # MADAGASCAR
HPDF_COUNTRY_MW ="MW" # MALAWI
HPDF_COUNTRY_MY ="MY" # MALAYSIA
HPDF_COUNTRY_MV ="MV" # MALDIVES
HPDF_COUNTRY_ML ="ML" # MALI
HPDF_COUNTRY_MT ="MT" # MALTA
HPDF_COUNTRY_MH ="MH" # MARSHALL ISLANDS
HPDF_COUNTRY_MQ ="MQ" # MARTINIQUE
HPDF_COUNTRY_MR ="MR" # MAURITANIA
HPDF_COUNTRY_MU ="MU" # MAURITIUS
HPDF_COUNTRY_YT ="YT" # MAYOTTE
HPDF_COUNTRY_MX ="MX" # MEXICO
HPDF_COUNTRY_FM ="FM" # MICRONESIA, FEDERATED STATES OF
HPDF_COUNTRY_MD ="MD" # MOLDOVA, REPUBLIC OF
HPDF_COUNTRY_MC ="MC" # MONACO
HPDF_COUNTRY_MN ="MN" # MONGOLIA
HPDF_COUNTRY_MS ="MS" # MONTSERRAT
HPDF_COUNTRY_MA ="MA" # MOROCCO
HPDF_COUNTRY_MZ ="MZ" # MOZAMBIQUE
HPDF_COUNTRY_MM ="MM" # MYANMAR
HPDF_COUNTRY_NA ="NA" # NAMIBIA
HPDF_COUNTRY_NR ="NR" # NAURU
HPDF_COUNTRY_NP ="NP" # NEPAL
HPDF_COUNTRY_NL ="NL" # NETHERLANDS
HPDF_COUNTRY_AN ="AN" # NETHERLANDS ANTILLES
HPDF_COUNTRY_NC ="NC" # NEW CALEDONIA
HPDF_COUNTRY_NZ ="NZ" # NEW ZEALAND
HPDF_COUNTRY_NI ="NI" # NICARAGUA
HPDF_COUNTRY_NE ="NE" # NIGER
HPDF_COUNTRY_NG ="NG" # NIGERIA
HPDF_COUNTRY_NU ="NU" # NIUE
HPDF_COUNTRY_NF ="NF" # NORFOLK ISLAND
HPDF_COUNTRY_MP ="MP" # NORTHERN MARIANA ISLANDS
HPDF_COUNTRY_NO ="NO" # NORWAY
HPDF_COUNTRY_OM ="OM" # OMAN
HPDF_COUNTRY_PK ="PK" # PAKISTAN
HPDF_COUNTRY_PW ="PW" # PALAU
HPDF_COUNTRY_PA ="PA" # PANAMA
HPDF_COUNTRY_PG ="PG" # PAPUA NEW GUINEA
HPDF_COUNTRY_PY ="PY" # PARAGUAY
HPDF_COUNTRY_PE ="PE" # PERU
HPDF_COUNTRY_PH ="PH" # PHILIPPINES
HPDF_COUNTRY_PN ="PN" # PITCAIRN
HPDF_COUNTRY_PL ="PL" # POLAND
HPDF_COUNTRY_PT ="PT" # PORTUGAL
HPDF_COUNTRY_PR ="PR" # PUERTO RICO
HPDF_COUNTRY_QA ="QA" # QATAR
HPDF_COUNTRY_RE ="RE" # REUNION
HPDF_COUNTRY_RO ="RO" # ROMANIA
HPDF_COUNTRY_RU ="RU" # RUSSIAN FEDERATION
HPDF_COUNTRY_RW ="RW" # RWANDA
HPDF_COUNTRY_KN ="KN" # SAINT KITTS AND NEVIS
HPDF_COUNTRY_LC ="LC" # SAINT LUCIA
HPDF_COUNTRY_VC ="VC" # SAINT VINCENT AND THE GRENADINES
HPDF_COUNTRY_WS ="WS" # SAMOA
HPDF_COUNTRY_SM ="SM" # SAN MARINO
HPDF_COUNTRY_ST ="ST" # SAO TOME AND PRINCIPE
HPDF_COUNTRY_SA ="SA" # SAUDI ARABIA
HPDF_COUNTRY_SN ="SN" # SENEGAL
HPDF_COUNTRY_SC ="SC" # SEYCHELLES
HPDF_COUNTRY_SL ="SL" # SIERRA LEONE
HPDF_COUNTRY_SG ="SG" # SINGAPORE
HPDF_COUNTRY_SK ="SK" # SLOVAKIA (Slovak Republic)
HPDF_COUNTRY_SI ="SI" # SLOVENIA
HPDF_COUNTRY_SB ="SB" # SOLOMON ISLANDS
HPDF_COUNTRY_SO ="SO" # SOMALIA
HPDF_COUNTRY_ZA ="ZA" # SOUTH AFRICA
HPDF_COUNTRY_ES ="ES" # SPAIN
HPDF_COUNTRY_LK ="LK" # SRI LANKA
HPDF_COUNTRY_SH ="SH" # ST. HELENA
HPDF_COUNTRY_PM ="PM" # ST. PIERRE AND MIQUELON
HPDF_COUNTRY_SD ="SD" # SUDAN
HPDF_COUNTRY_SR ="SR" # SURINAME
HPDF_COUNTRY_SJ ="SJ" # SVALBARD AND JAN MAYEN ISLANDS
HPDF_COUNTRY_SZ ="SZ" # SWAZILAND
HPDF_COUNTRY_SE ="SE" # SWEDEN
HPDF_COUNTRY_CH ="CH" # SWITZERLAND
HPDF_COUNTRY_SY ="SY" # SYRIAN ARAB REPUBLIC
HPDF_COUNTRY_TW ="TW" # TAIWAN, PROVINCE OF CHINA
HPDF_COUNTRY_TJ ="TJ" # TAJIKISTAN
HPDF_COUNTRY_TZ ="TZ" # TANZANIA, UNITED REPUBLIC OF
HPDF_COUNTRY_TH ="TH" # THAILAND
HPDF_COUNTRY_TG ="TG" # TOGO
HPDF_COUNTRY_TK ="TK" # TOKELAU
HPDF_COUNTRY_TO ="TO" # TONGA
HPDF_COUNTRY_TT ="TT" # TRINIDAD AND TOBAGO
HPDF_COUNTRY_TN ="TN" # TUNISIA
HPDF_COUNTRY_TR ="TR" # TURKEY
HPDF_COUNTRY_TM ="TM" # TURKMENISTAN
HPDF_COUNTRY_TC ="TC" # TURKS AND CAICOS ISLANDS
HPDF_COUNTRY_TV ="TV" # TUVALU
HPDF_COUNTRY_UG ="UG" # UGANDA
HPDF_COUNTRY_UA ="UA" # UKRAINE
HPDF_COUNTRY_AE ="AE" # UNITED ARAB EMIRATES
HPDF_COUNTRY_GB ="GB" # UNITED KINGDOM
HPDF_COUNTRY_US ="US" # UNITED STATES
HPDF_COUNTRY_UM ="UM" # UNITED STATES MINOR OUTLYING ISLANDS
HPDF_COUNTRY_UY ="UY" # URUGUAY
HPDF_COUNTRY_UZ ="UZ" # UZBEKISTAN
HPDF_COUNTRY_VU ="VU" # VANUATU
HPDF_COUNTRY_VA ="VA" # VATICAN CITY STATE (HOLY SEE)
HPDF_COUNTRY_VE ="VE" # VENEZUELA
HPDF_COUNTRY_VN ="VN" # VIET NAM
HPDF_COUNTRY_VG ="VG" # VIRGIN ISLANDS (BRITISH)
HPDF_COUNTRY_VI ="VI" # VIRGIN ISLANDS (U.S.)
HPDF_COUNTRY_WF ="WF" # WALLIS AND FUTUNA ISLANDS
HPDF_COUNTRY_EH ="EH" # WESTERN SAHARA
HPDF_COUNTRY_YE ="YE" # YEMEN
HPDF_COUNTRY_YU ="YU" # YUGOSLAVIA
HPDF_COUNTRY_ZR ="ZR" # ZAIRE
HPDF_COUNTRY_ZM ="ZM" # ZAMBIA
HPDF_COUNTRY_ZW ="ZW" # ZIMBABWE
#----------------------------------------------------------------------------
#----- lang code definition -------------------------------------------------
HPDF_LANG_AA ="aa" # Afar
HPDF_LANG_AB ="ab" # Abkhazian
HPDF_LANG_AF ="af" # Afrikaans
HPDF_LANG_AM ="am" # Amharic
HPDF_LANG_AR ="ar" # Arabic
HPDF_LANG_AS ="as" # Assamese
HPDF_LANG_AY ="ay" # Aymara
HPDF_LANG_AZ ="az" # Azerbaijani
HPDF_LANG_BA ="ba" # Bashkir
HPDF_LANG_BE ="be" # Byelorussian
HPDF_LANG_BG ="bg" # Bulgarian
HPDF_LANG_BH ="bh" # Bihari
HPDF_LANG_BI ="bi" # Bislama
HPDF_LANG_BN ="bn" # Bengali Bangla
HPDF_LANG_BO ="bo" # Tibetan
HPDF_LANG_BR ="br" # Breton
HPDF_LANG_CA ="ca" # Catalan
HPDF_LANG_CO ="co" # Corsican
HPDF_LANG_CS ="cs" # Czech
HPDF_LANG_CY ="cy" # Welsh
HPDF_LANG_DA ="da" # Danish
HPDF_LANG_DE ="de" # German
HPDF_LANG_DZ ="dz" # Bhutani
HPDF_LANG_EL ="el" # Greek
HPDF_LANG_EN ="en" # English
HPDF_LANG_EO ="eo" # Esperanto
HPDF_LANG_ES ="es" # Spanish
HPDF_LANG_ET ="et" # Estonian
HPDF_LANG_EU ="eu" # Basque
HPDF_LANG_FA ="fa" # Persian
HPDF_LANG_FI ="fi" # Finnish
HPDF_LANG_FJ ="fj" # Fiji
HPDF_LANG_FO ="fo" # Faeroese
HPDF_LANG_FR ="fr" # French
HPDF_LANG_FY ="fy" # Frisian
HPDF_LANG_GA ="ga" # Irish
HPDF_LANG_GD ="gd" # Scots Gaelic
HPDF_LANG_GL ="gl" # Galician
HPDF_LANG_GN ="gn" # Guarani
HPDF_LANG_GU ="gu" # Gujarati
HPDF_LANG_HA ="ha" # Hausa
HPDF_LANG_HI ="hi" # Hindi
HPDF_LANG_HR ="hr" # Croatian
HPDF_LANG_HU ="hu" # Hungarian
HPDF_LANG_HY ="hy" # Armenian
HPDF_LANG_IA ="ia" # Interlingua
HPDF_LANG_IE ="ie" # Interlingue
HPDF_LANG_IK ="ik" # Inupiak
HPDF_LANG_IN ="in" # Indonesian
HPDF_LANG_IS ="is" # Icelandic
HPDF_LANG_IT ="it" # Italian
HPDF_LANG_IW ="iw" # Hebrew
HPDF_LANG_JA ="ja" # Japanese
HPDF_LANG_JI ="ji" # Yiddish
HPDF_LANG_JW ="jw" # Javanese
HPDF_LANG_KA ="ka" # Georgian
HPDF_LANG_KK ="kk" # Kazakh
HPDF_LANG_KL ="kl" # Greenlandic
HPDF_LANG_KM ="km" # Cambodian
HPDF_LANG_KN ="kn" # Kannada
HPDF_LANG_KO ="ko" # Korean
HPDF_LANG_KS ="ks" # Kashmiri
HPDF_LANG_KU ="ku" # Kurdish
HPDF_LANG_KY ="ky" # Kirghiz
HPDF_LANG_LA ="la" # Latin
HPDF_LANG_LN ="ln" # Lingala
HPDF_LANG_LO ="lo" # Laothian
HPDF_LANG_LT ="lt" # Lithuanian
HPDF_LANG_LV ="lv" # Latvian,Lettish
HPDF_LANG_MG ="mg" # Malagasy
HPDF_LANG_MI ="mi" # Maori
HPDF_LANG_MK ="mk" # Macedonian
HPDF_LANG_ML ="ml" # Malayalam
HPDF_LANG_MN ="mn" # Mongolian
HPDF_LANG_MO ="mo" # Moldavian
HPDF_LANG_MR ="mr" # Marathi
HPDF_LANG_MS ="ms" # Malay
HPDF_LANG_MT ="mt" # Maltese
HPDF_LANG_MY ="my" # Burmese
HPDF_LANG_NA ="na" # Nauru
HPDF_LANG_NE ="ne" # Nepali
HPDF_LANG_NL ="nl" # Dutch
HPDF_LANG_NO ="no" # Norwegian
HPDF_LANG_OC ="oc" # Occitan
HPDF_LANG_OM ="om" # (Afan)Oromo
HPDF_LANG_OR ="or" # Oriya
HPDF_LANG_PA ="pa" # Punjabi
HPDF_LANG_PL ="pl" # Polish
HPDF_LANG_PS ="ps" # Pashto,Pushto
HPDF_LANG_PT ="pt" # Portuguese
HPDF_LANG_QU ="qu" # Quechua
HPDF_LANG_RM ="rm" # Rhaeto-Romance
HPDF_LANG_RN ="rn" # Kirundi
HPDF_LANG_RO ="ro" # Romanian
HPDF_LANG_RU ="ru" # Russian
HPDF_LANG_RW ="rw" # Kinyarwanda
HPDF_LANG_SA ="sa" # Sanskrit
HPDF_LANG_SD ="sd" # Sindhi
HPDF_LANG_SG ="sg" # Sangro
HPDF_LANG_SH ="sh" # Serbo-Croatian
HPDF_LANG_SI ="si" # Singhalese
HPDF_LANG_SK ="sk" # Slovak
HPDF_LANG_SL ="sl" # Slovenian
HPDF_LANG_SM ="sm" # Samoan
HPDF_LANG_SN ="sn" # Shona
HPDF_LANG_SO ="so" # Somali
HPDF_LANG_SQ ="sq" # Albanian
HPDF_LANG_SR ="sr" # Serbian
HPDF_LANG_SS ="ss" # Siswati
HPDF_LANG_ST ="st" # Sesotho
HPDF_LANG_SU ="su" # Sundanese
HPDF_LANG_SV ="sv" # Swedish
HPDF_LANG_SW ="sw" # Swahili
HPDF_LANG_TA ="ta" # Tamil
HPDF_LANG_TE ="te" # Tegulu
HPDF_LANG_TG ="tg" # Tajik
HPDF_LANG_TH ="th" # Thai
HPDF_LANG_TI ="ti" # Tigrinya
HPDF_LANG_TK ="tk" # Turkmen
HPDF_LANG_TL ="tl" # Tagalog
HPDF_LANG_TN ="tn" # Setswanato Tonga
HPDF_LANG_TR ="tr" # Turkish
HPDF_LANG_TS ="ts" # Tsonga
HPDF_LANG_TT ="tt" # Tatar
HPDF_LANG_TW ="tw" # Twi
HPDF_LANG_UK ="uk" # Ukrainian
HPDF_LANG_UR ="ur" # Urdu
HPDF_LANG_UZ ="uz" # Uzbek
HPDF_LANG_VI ="vi" # Vietnamese
HPDF_LANG_VO ="vo" # Volapuk
HPDF_LANG_WO ="wo" # Wolof
HPDF_LANG_XH ="xh" # Xhosa
HPDF_LANG_YO ="yo" # Yoruba
HPDF_LANG_ZH ="zh" # Chinese
HPDF_LANG_ZU ="zu" # Zulu
#----------------------------------------------------------------------------
#----- Graphis mode ---------------------------------------------------------
HPDF_GMODE_PAGE_DESCRIPTION =0x0001
HPDF_GMODE_PATH_OBJECT =0x0002
HPDF_GMODE_TEXT_OBJECT =0x0004
HPDF_GMODE_CLIPPING_PATH =0x0008
HPDF_GMODE_SHADING =0x0010
HPDF_GMODE_INLINE_IMAGE =0x0020
HPDF_GMODE_EXTERNAL_OBJECT =0x0040
| gpl-3.0 |
rebeling/pattern | pattern/server/cherrypy/cherrypy/lib/xmlrpcutil.py | 123 | 1606 | import sys
import cherrypy
from cherrypy._cpcompat import ntob
def get_xmlrpclib():
try:
import xmlrpc.client as x
except ImportError:
import xmlrpclib as x
return x
def process_body():
"""Return (params, method) from request body."""
try:
return get_xmlrpclib().loads(cherrypy.request.body.read())
except Exception:
return ('ERROR PARAMS', ), 'ERRORMETHOD'
def patched_path(path):
"""Return 'path', doctored for RPC."""
if not path.endswith('/'):
path += '/'
if path.startswith('/RPC2/'):
# strip the first /rpc2
path = path[5:]
return path
def _set_response(body):
# The XML-RPC spec (http://www.xmlrpc.com/spec) says:
# "Unless there's a lower-level error, always return 200 OK."
# Since Python's xmlrpclib interprets a non-200 response
# as a "Protocol Error", we'll just return 200 every time.
response = cherrypy.response
response.status = '200 OK'
response.body = ntob(body, 'utf-8')
response.headers['Content-Type'] = 'text/xml'
response.headers['Content-Length'] = len(body)
def respond(body, encoding='utf-8', allow_none=0):
xmlrpclib = get_xmlrpclib()
if not isinstance(body, xmlrpclib.Fault):
body = (body,)
_set_response(xmlrpclib.dumps(body, methodresponse=1,
encoding=encoding,
allow_none=allow_none))
def on_error(*args, **kwargs):
body = str(sys.exc_info()[1])
xmlrpclib = get_xmlrpclib()
_set_response(xmlrpclib.dumps(xmlrpclib.Fault(1, body)))
| bsd-3-clause |
dbentley/pants | tests/python/pants_test/tasks/test_execution_graph.py | 23 | 10773 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job, JobExistsError,
NoRootJobError, UnknownJobError)
class ImmediatelyExecutingPool(object):
num_workers = 1
def submit_async_work(self, work):
work.func(*work.args_tuples[0])
class PrintLogger(object):
def error(self, msg):
print(msg)
def debug(self, msg):
print(msg)
def passing_fn():
pass
def raising_fn():
raise Exception("I'm an error")
class ExecutionGraphTest(unittest.TestCase):
def setUp(self):
self.jobs_run = []
def execute(self, exec_graph):
exec_graph.execute(ImmediatelyExecutingPool(), PrintLogger())
def job(self, name, fn, dependencies, size=0, on_success=None, on_failure=None):
def recording_fn():
self.jobs_run.append(name)
fn()
return Job(name, recording_fn, dependencies, size, on_success, on_failure)
def test_single_job(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A"])
def test_single_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "A"])
def test_simple_binary_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_simple_linear_dependencies(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["C", "B", "A"])
def test_simple_unconnected(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B"])
def test_simple_unconnected_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_dependee_depends_on_dependency_of_its_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(["C", "B", "A"], self.jobs_run)
def test_one_failure_raises_exception(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Failed jobs: A", str(cm.exception))
def test_failure_of_dependency_does_not_run_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_dependency_does_not_run_second_order_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_one_leg_of_tree_does_not_cancel_other(self):
# TODO do we want this behavior, or do we want to fail fast on the first failed job?
exec_graph = ExecutionGraph([self.job("B", passing_fn, []),
self.job("F", raising_fn, ["B"]),
self.job("A", passing_fn, ["B"])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertTrue(self.jobs_run == ["B", "F", "A"] or self.jobs_run == ["B", "A", "F"])
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_disconnected_job_does_not_cancel_non_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(["A", "F"], self.jobs_run)
def test_cycle_in_graph_causes_failure(self):
with self.assertRaises(NoRootJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["A"])])
self.assertEqual(
"Unexecutable graph: All scheduled jobs have dependencies. "
"There must be a circular dependency.",
str(cm.exception))
def test_non_existent_dependency_causes_failure(self):
with self.assertRaises(UnknownJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, ["Z"])])
self.assertEqual("Unexecutable graph: Undefined dependencies u'Z'", str(cm.exception))
def test_on_success_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], on_success=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_success for A: I'm an error", str(cm.exception))
def test_on_failure_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [], on_failure=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_failure for A: I'm an error", str(cm.exception))
def test_same_key_scheduled_twice_is_error(self):
with self.assertRaises(JobExistsError) as cm:
ExecutionGraph([self.job("Same", passing_fn, []),
self.job("Same", passing_fn, [])])
self.assertEqual("Unexecutable graph: Job already scheduled u'Same'", str(cm.exception))
def test_priorities_for_chain_of_jobs(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["B"], 2),
self.job("D", passing_fn, ["C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 15, "B": 7, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_fork(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 2, "C": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C"])
def test_priorities_for_mirrored_fork(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 1),
self.job("C", passing_fn, ["A"], 2)])
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 1, "C": 2})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B"])
def test_priorities_for_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["A"], 2),
self.job("D", passing_fn, ["B", "C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 5, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_mirrored_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 4),
self.job("D", passing_fn, ["B", "C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 3, "C": 5, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B", "D"])
def test_priorities_for_skewed_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 1),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["B"], 4),
self.job("D", passing_fn, ["A"], 8),
self.job("E", passing_fn, ["C", "D"], 16)])
self.assertEqual(exec_graph._job_priority, {"A": 25, "B": 22, "C": 20, "D": 24, "E": 16})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "D", "B", "C", "E"])
def test_jobs_not_canceled_multiple_times(self):
failures = list()
def collect_failure(jobname):
def fn():
failures.append(jobname)
return fn
def my_job(name, result_fn, deps):
return self.job(name, result_fn, deps, 1, on_failure=collect_failure(name))
exec_graph = ExecutionGraph([my_job('A', raising_fn, []),
my_job('B1', passing_fn, ['A']),
my_job('B2', passing_fn, ['A']),
my_job('C1', passing_fn, ['B1', 'B2']),
my_job('C2', passing_fn, ['B1', 'B2']),
my_job('E', passing_fn, ['C2'])])
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ['A'])
self.assertEqual(failures, ['A', 'B1', 'B2', 'C1', 'C2', 'E'])
| apache-2.0 |
FlintHill/SUAS-Competition | env/lib/python3.7/site-packages/pip/_internal/req/constructors.py | 8 | 9573 | """Backing implementation for InstallRequirement's various constructors
The idea here is that these formed a major chunk of InstallRequirement's size
so, moving them and support code dedicated to them outside of that class
helps creates for better understandability for the rest of the code.
These are meant to be used elsewhere within pip to create instances of
InstallRequirement.
"""
import logging
import os
import re
import traceback
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.specifiers import Specifier
from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
from pip._internal.download import (
is_archive_file, is_url, path_to_url, url_to_path,
)
from pip._internal.exceptions import InstallationError
from pip._internal.models.index import PyPI, TestPyPI
from pip._internal.models.link import Link
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.misc import is_installable_dir
from pip._internal.vcs import vcs
from pip._internal.wheel import Wheel
__all__ = [
"install_req_from_editable", "install_req_from_line",
"parse_editable"
]
logger = logging.getLogger(__name__)
operators = Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def parse_editable(editable_req):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '%s', please specify one "
"with #egg=your_package_name" % editable_req
)
return package_name, url, None
def deduce_helpful_msg(req):
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
"""
msg = ""
if os.path.exists(req):
msg = " It does exist."
# Try to parse and check if it is a requirements file.
try:
with open(req, 'r') as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += " The argument you provided " + \
"(%s) appears to be a" % (req) + \
" requirements file. If that is the" + \
" case, use the '-r' flag to install" + \
" the packages specified within it."
except RequirementParseError:
logger.debug("Cannot parse '%s' as requirements \
file" % (req), exc_info=1)
else:
msg += " File '%s' does not exist." % (req)
return msg
# ---- The actual constructors follow ----
def install_req_from_editable(
editable_req, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False
):
name, url, extras_override = parse_editable(editable_req)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
if name is not None:
try:
req = Requirement(name)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '%s'" % name)
else:
req = None
return InstallRequirement(
req, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache,
extras=extras_override or (),
)
def install_req_from_line(
name, comes_from=None, isolated=False, options=None, wheel_cache=None,
constraint=False
):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = Marker(markers)
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
looks_like_dir = os.path.isdir(p) and (
os.path.sep in name or
(os.path.altsep is not None and os.path.altsep in name) or
name.startswith('.')
)
if looks_like_dir:
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
if extras:
extras = Requirement("placeholder" + extras.lower()).extras
else:
extras = ()
if req is not None:
try:
req = Requirement(req)
except InvalidRequirement:
if os.path.sep in req:
add_msg = "It looks like a path."
add_msg += deduce_helpful_msg(req)
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg)
)
return InstallRequirement(
req, comes_from, link=link, markers=markers,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache,
constraint=constraint,
extras=extras,
)
def install_req_from_req(
req, comes_from=None, isolated=False, wheel_cache=None
):
try:
req = Requirement(req)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '%s'" % req)
domains_not_allowed = [
PyPI.file_storage_domain,
TestPyPI.file_storage_domain,
]
if req.url and comes_from.link.netloc in domains_not_allowed:
# Explicitly disallow pypi packages that depend on external urls
raise InstallationError(
"Packages installed from PyPI cannot depend on packages "
"which are not also hosted on PyPI.\n"
"%s depends on %s " % (comes_from.name, req)
)
return InstallRequirement(
req, comes_from, isolated=isolated, wheel_cache=wheel_cache
)
| mit |
tredavis/SoundprintOS | node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
andurilhuang/Movie_Income_Prediction | paper/historycode/IMDbPY-5.1.1/imdb/linguistics.py | 76 | 9353 | """
linguistics module (imdb package).
This module provides functions and data to handle in a smart way
languages and articles (in various languages) at the beginning of movie titles.
Copyright 2009-2012 Davide Alberani <da@erlug.linux.it>
2012 Alberto Malagoli <albemala AT gmail.com>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'lo', 'el', 'las', 'un', 'los', 'una', 'al', 'del',
'unos', 'unas', 'uno'),
'French': ('le', "l'", 'la', 'les', 'un', 'une', 'des', 'au', 'du', '\xc3\xa0 la',
'de la', 'aux'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# imdbpy-devel@lists.sourceforge.net .
LANG_COUNTRIES = {
'English': ('Canada', 'Swaziland', 'Ghana', 'St. Lucia', 'Liberia', 'Jamaica', 'Bahamas', 'New Zealand', 'Lesotho', 'Kenya', 'Solomon Islands', 'United States', 'South Africa', 'St. Vincent and the Grenadines', 'Fiji', 'UK', 'Nigeria', 'Australia', 'USA', 'St. Kitts and Nevis', 'Belize', 'Sierra Leone', 'Gambia', 'Namibia', 'Micronesia', 'Kiribati', 'Grenada', 'Antigua and Barbuda', 'Barbados', 'Malta', 'Zimbabwe', 'Ireland', 'Uganda', 'Trinidad and Tobago', 'South Sudan', 'Guyana', 'Botswana', 'United Kingdom', 'Zambia'),
'Italian': ('Italy', 'San Marino', 'Vatican City'),
'Spanish': ('Spain', 'Mexico', 'Argentina', 'Bolivia', 'Guatemala', 'Uruguay', 'Peru', 'Cuba', 'Dominican Republic', 'Panama', 'Costa Rica', 'Ecuador', 'El Salvador', 'Chile', 'Equatorial Guinea', 'Spain', 'Colombia', 'Nicaragua', 'Venezuela', 'Honduras', 'Paraguay'),
'French': ('Cameroon', 'Burkina Faso', 'Dominica', 'Gabon', 'Monaco', 'France', "Cote d'Ivoire", 'Benin', 'Togo', 'Central African Republic', 'Mali', 'Niger', 'Congo, Republic of', 'Guinea', 'Congo, Democratic Republic of the', 'Luxembourg', 'Haiti', 'Chad', 'Burundi', 'Madagascar', 'Comoros', 'Senegal'),
'Portuguese': ('Portugal', 'Brazil', 'Sao Tome and Principe', 'Cape Verde', 'Angola', 'Mozambique', 'Guinea-Bissau'),
'German': ('Liechtenstein', 'Austria', 'West Germany', 'Switzerland', 'East Germany', 'Germany'),
'Arabic': ('Saudi Arabia', 'Kuwait', 'Jordan', 'Oman', 'Yemen', 'United Arab Emirates', 'Mauritania', 'Lebanon', 'Bahrain', 'Libya', 'Palestinian State (proposed)', 'Qatar', 'Algeria', 'Morocco', 'Iraq', 'Egypt', 'Djibouti', 'Sudan', 'Syria', 'Tunisia'),
'Turkish': ('Turkey', 'Azerbaijan'),
'Swahili': ('Tanzania',),
'Swedish': ('Sweden',),
'Icelandic': ('Iceland',),
'Estonian': ('Estonia',),
'Romanian': ('Romania',),
'Samoan': ('Samoa',),
'Slovenian': ('Slovenia',),
'Tok Pisin': ('Papua New Guinea',),
'Palauan': ('Palau',),
'Macedonian': ('Macedonia',),
'Hindi': ('India',),
'Dutch': ('Netherlands', 'Belgium', 'Suriname'),
'Marshallese': ('Marshall Islands',),
'Korean': ('Korea, North', 'Korea, South', 'North Korea', 'South Korea'),
'Vietnamese': ('Vietnam',),
'Danish': ('Denmark',),
'Khmer': ('Cambodia',),
'Lao': ('Laos',),
'Somali': ('Somalia',),
'Filipino': ('Philippines',),
'Hungarian': ('Hungary',),
'Ukrainian': ('Ukraine',),
'Bosnian': ('Bosnia and Herzegovina',),
'Georgian': ('Georgia',),
'Lithuanian': ('Lithuania',),
'Malay': ('Brunei',),
'Tetum': ('East Timor',),
'Norwegian': ('Norway',),
'Armenian': ('Armenia',),
'Russian': ('Russia',),
'Slovak': ('Slovakia',),
'Thai': ('Thailand',),
'Croatian': ('Croatia',),
'Turkmen': ('Turkmenistan',),
'Nepali': ('Nepal',),
'Finnish': ('Finland',),
'Uzbek': ('Uzbekistan',),
'Albanian': ('Albania', 'Kosovo'),
'Hebrew': ('Israel',),
'Bulgarian': ('Bulgaria',),
'Greek': ('Cyprus', 'Greece'),
'Burmese': ('Myanmar',),
'Latvian': ('Latvia',),
'Serbian': ('Serbia',),
'Afar': ('Eritrea',),
'Catalan': ('Andorra',),
'Chinese': ('China', 'Taiwan'),
'Czech': ('Czech Republic', 'Czechoslovakia'),
'Bislama': ('Vanuatu',),
'Japanese': ('Japan',),
'Kinyarwanda': ('Rwanda',),
'Amharic': ('Ethiopia',),
'Persian': ('Afghanistan', 'Iran'),
'Tajik': ('Tajikistan',),
'Mongolian': ('Mongolia',),
'Dzongkha': ('Bhutan',),
'Urdu': ('Pakistan',),
'Polish': ('Poland',),
'Sinhala': ('Sri Lanka',),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in LANG_COUNTRIES:
for country in LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| mit |
sbidoul/buildbot | master/buildbot/test/unit/test_data_changes.py | 6 | 15376 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import mock
from twisted.internet import defer
from twisted.internet import task
from twisted.trial import unittest
from buildbot.data import changes
from buildbot.data import resultspec
from buildbot.process.users import users
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.util import epoch2datetime
class ChangeEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = changes.ChangeEndpoint
resourceTypeClass = changes.Change
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.SourceStamp(id=234),
fakedb.Change(changeid=13, branch=u'trunk', revision=u'9283',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=234),
])
def tearDown(self):
self.tearDownEndpoint()
def test_get_existing(self):
d = self.callGet(('changes', '13'))
@d.addCallback
def check(change):
self.validateData(change)
self.assertEqual(change['project'], 'world-domination')
return d
def test_get_missing(self):
d = self.callGet(('changes', '99'))
@d.addCallback
def check(change):
self.assertEqual(change, None)
return d
class ChangesEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = changes.ChangesEndpoint
resourceTypeClass = changes.Change
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.SourceStamp(id=133),
fakedb.Change(changeid=13, branch=u'trunk', revision=u'9283',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=133),
fakedb.SourceStamp(id=144),
fakedb.Change(changeid=14, branch=u'devel', revision=u'9284',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=144),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
changes = yield self.callGet(('changes',))
self.validateData(changes[0])
self.assertEqual(changes[0]['changeid'], 13)
self.validateData(changes[1])
self.assertEqual(changes[1]['changeid'], 14)
@defer.inlineCallbacks
def test_getRecentChanges(self):
resultSpec = resultspec.ResultSpec(limit=1, order=['-changeid'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
self.validateData(changes[0])
self.assertEqual(changes[0]['changeid'], 14)
self.assertEqual(len(changes), 1)
@defer.inlineCallbacks
def test_getChangesOtherOrder(self):
resultSpec = resultspec.ResultSpec(limit=1, order=['-when_time_stamp'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
# limit not implemented for other order
self.assertEqual(len(changes), 2)
@defer.inlineCallbacks
def test_getChangesOtherOffset(self):
resultSpec = resultspec.ResultSpec(
limit=1, offset=1, order=['-changeid'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
# limit not implemented for other offset
self.assertEqual(len(changes), 2)
class Change(interfaces.InterfaceTests, unittest.TestCase):
changeEvent = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
def setUp(self):
self.master = fakemaster.make_master(wantMq=True, wantDb=True,
wantData=True, testcase=self)
self.rtype = changes.Change(self.master)
def test_signature_addChange(self):
@self.assertArgSpecMatches(
self.master.data.updates.addChange, # fake
self.rtype.addChange) # real
def addChange(self, files=None, comments=None, author=None,
revision=None, when_timestamp=None, branch=None, category=None,
revlink=u'', properties=None, repository=u'', codebase=None,
project=u'', src=None):
pass
def do_test_addChange(self, kwargs,
expectedRoutingKey, expectedMessage, expectedRow,
expectedChangeUsers=[]):
clock = task.Clock()
clock.advance(10000000)
d = self.rtype.addChange(_reactor=clock, **kwargs)
def check(changeid):
self.assertEqual(changeid, 500)
# check the correct message was received
self.master.mq.assertProductions([
(expectedRoutingKey, expectedMessage),
])
# and that the correct data was inserted into the db
self.master.db.changes.assertChange(500, expectedRow)
self.master.db.changes.assertChangeUsers(500, expectedChangeUsers)
d.addCallback(check)
return d
def test_addChange(self):
# src and codebase are default here
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = self.changeEvent
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
def test_addChange_src_codebase(self):
createUserObject = mock.Mock(spec=users.createUserObject)
createUserObject.return_value = defer.succeed(123)
self.patch(users, 'createUserObject', createUserObject)
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20}, src=u'git', codebase=u'cb')
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'cb',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'cb',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='cb',
project='Buildbot',
sourcestampid=100,
)
d = self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow,
expectedChangeUsers=[123])
@d.addCallback
def check(_):
createUserObject.assert_called_once_with(
self.master, 'warner', 'git')
return d
def test_addChange_src_codebaseGenerator(self):
def preChangeGenerator(**kwargs):
return kwargs
self.master.config = mock.Mock(name='master.config')
self.master.config.preChangeGenerator = preChangeGenerator
self.master.config.codebaseGenerator = \
lambda change: 'cb-%s' % change['category']
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'cb-devel',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'cb-devel',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='cb-devel',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
def test_addChange_repository_revision(self):
self.master.config = mock.Mock(name='master.config')
self.master.config.revlink = lambda rev, repo: 'foo%sbar%sbaz' % (repo, rev)
# revlink is default here
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
codebase=u'', revision=u'0e92a098b', when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
# When no revlink is passed to addChange, but a repository and revision is
# passed, the revlink should be constructed by calling the revlink callable
# in the config. We thus expect a revlink of 'foogit://warnerbar0e92a098bbaz'
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'foogit://warnerbar0e92a098bbaz',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='foogit://warnerbar0e92a098bbaz',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
| gpl-2.0 |
rahuldan/sympy | sympy/polys/tests/test_solvers.py | 88 | 13327 | """Tests for low-level linear systems solver. """
from sympy.polys.rings import ring
from sympy.polys.fields import field
from sympy.polys.domains import ZZ, QQ
from sympy.polys.solvers import solve_lin_sys
def test_solve_lin_sys_2x2_one():
domain, x1,x2 = ring("x1,x2", QQ)
eqs = [x1 + x2 - 5,
2*x1 - x2]
sol = {x1: QQ(5, 3), x2: QQ(10, 3)}
_sol = solve_lin_sys(eqs, domain)
assert _sol == sol and all(isinstance(s, domain.dtype) for s in _sol)
def test_solve_lin_sys_2x4_none():
domain, x1,x2 = ring("x1,x2", QQ)
eqs = [x1 - 1,
x1 - x2,
x1 - 2*x2,
x2 - 1]
assert solve_lin_sys(eqs, domain) == None
def test_solve_lin_sys_3x4_one():
domain, x1,x2,x3 = ring("x1,x2,x3", QQ)
eqs = [x1 + 2*x2 + 3*x3,
2*x1 - x2 + x3,
3*x1 + x2 + x3,
5*x2 + 2*x3]
sol = {x1: 0, x2: 0, x3: 0}
assert solve_lin_sys(eqs, domain) == sol
def test_solve_lin_sys_3x3_inf():
domain, x1,x2,x3 = ring("x1,x2,x3", QQ)
eqs = [x1 - x2 + 2*x3 - 1,
2*x1 + x2 + x3 - 8,
x1 + x2 - 5]
sol = {x1: -x3 + 3, x2: x3 + 2}
assert solve_lin_sys(eqs, domain) == sol
def test_solve_lin_sys_3x4_none():
domain, x1,x2,x3,x4 = ring("x1,x2,x3,x4", QQ)
eqs = [2*x1 + x2 + 7*x3 - 7*x4 - 2,
-3*x1 + 4*x2 - 5*x3 - 6*x4 - 3,
x1 + x2 + 4*x3 - 5*x4 - 2]
assert solve_lin_sys(eqs, domain) == None
def test_solve_lin_sys_4x7_inf():
domain, x1,x2,x3,x4,x5,x6,x7 = ring("x1,x2,x3,x4,x5,x6,x7", QQ)
eqs = [x1 + 4*x2 - x4 + 7*x6 - 9*x7 - 3,
2*x1 + 8*x2 - x3 + 3*x4 + 9*x5 - 13*x6 + 7*x7 - 9,
2*x3 - 3*x4 - 4*x5 + 12*x6 - 8*x7 - 1,
-x1 - 4*x2 + 2*x3 + 4*x4 + 8*x5 - 31*x6 + 37*x7 - 4]
sol = {x1: 4 - 4*x2 - 2*x5 - x6 + 3*x7,
x3: 2 - x5 + 3*x6 - 5*x7,
x4: 1 - 2*x5 + 6*x6 - 6*x7}
assert solve_lin_sys(eqs, domain) == sol
def test_solve_lin_sys_5x5_inf():
domain, x1,x2,x3,x4,x5 = ring("x1,x2,x3,x4,x5", QQ)
eqs = [x1 - x2 - 2*x3 + x4 + 11*x5 - 13,
x1 - x2 + x3 + x4 + 5*x5 - 16,
2*x1 - 2*x2 + x4 + 10*x5 - 21,
2*x1 - 2*x2 - x3 + 3*x4 + 20*x5 - 38,
2*x1 - 2*x2 + x3 + x4 + 8*x5 - 22]
sol = {x1: 6 + x2 - 3*x5,
x3: 1 + 2*x5,
x4: 9 - 4*x5}
assert solve_lin_sys(eqs, domain) == sol
def test_solve_lin_sys_6x6_1():
ground, d,r,e,g,i,j,l,o,m,p,q = field("d,r,e,g,i,j,l,o,m,p,q", ZZ)
domain, c,f,h,k,n,b = ring("c,f,h,k,n,b", ground)
eqs = [b + q/d - c/d, c*(1/d + 1/e + 1/g) - f/g - q/d, f*(1/g + 1/i + 1/j) - c/g - h/i, h*(1/i + 1/l + 1/m) - f/i - k/m, k*(1/m + 1/o + 1/p) - h/m - n/p, n/p - k/p]
sol = {
b: (e*i*l*q + e*i*m*q + e*i*o*q + e*j*l*q + e*j*m*q + e*j*o*q + e*l*m*q + e*l*o*q + g*i*l*q + g*i*m*q + g*i*o*q + g*j*l*q + g*j*m*q + g*j*o*q + g*l*m*q + g*l*o*q + i*j*l*q + i*j*m*q + i*j*o*q + j*l*m*q + j*l*o*q)/(-d*e*i*l - d*e*i*m - d*e*i*o - d*e*j*l - d*e*j*m - d*e*j*o - d*e*l*m - d*e*l*o - d*g*i*l - d*g*i*m - d*g*i*o - d*g*j*l - d*g*j*m - d*g*j*o - d*g*l*m - d*g*l*o - d*i*j*l - d*i*j*m - d*i*j*o - d*j*l*m - d*j*l*o - e*g*i*l - e*g*i*m - e*g*i*o - e*g*j*l - e*g*j*m - e*g*j*o - e*g*l*m - e*g*l*o - e*i*j*l - e*i*j*m - e*i*j*o - e*j*l*m - e*j*l*o),
c: (-e*g*i*l*q - e*g*i*m*q - e*g*i*o*q - e*g*j*l*q - e*g*j*m*q - e*g*j*o*q - e*g*l*m*q - e*g*l*o*q - e*i*j*l*q - e*i*j*m*q - e*i*j*o*q - e*j*l*m*q - e*j*l*o*q)/(-d*e*i*l - d*e*i*m - d*e*i*o - d*e*j*l - d*e*j*m - d*e*j*o - d*e*l*m - d*e*l*o - d*g*i*l - d*g*i*m - d*g*i*o - d*g*j*l - d*g*j*m - d*g*j*o - d*g*l*m - d*g*l*o - d*i*j*l - d*i*j*m - d*i*j*o - d*j*l*m - d*j*l*o - e*g*i*l - e*g*i*m - e*g*i*o - e*g*j*l - e*g*j*m - e*g*j*o - e*g*l*m - e*g*l*o - e*i*j*l - e*i*j*m - e*i*j*o - e*j*l*m - e*j*l*o),
f: (-e*i*j*l*q - e*i*j*m*q - e*i*j*o*q - e*j*l*m*q - e*j*l*o*q)/(-d*e*i*l - d*e*i*m - d*e*i*o - d*e*j*l - d*e*j*m - d*e*j*o - d*e*l*m - d*e*l*o - d*g*i*l - d*g*i*m - d*g*i*o - d*g*j*l - d*g*j*m - d*g*j*o - d*g*l*m - d*g*l*o - d*i*j*l - d*i*j*m - d*i*j*o - d*j*l*m - d*j*l*o - e*g*i*l - e*g*i*m - e*g*i*o - e*g*j*l - e*g*j*m - e*g*j*o - e*g*l*m - e*g*l*o - e*i*j*l - e*i*j*m - e*i*j*o - e*j*l*m - e*j*l*o),
h: (-e*j*l*m*q - e*j*l*o*q)/(-d*e*i*l - d*e*i*m - d*e*i*o - d*e*j*l - d*e*j*m - d*e*j*o - d*e*l*m - d*e*l*o - d*g*i*l - d*g*i*m - d*g*i*o - d*g*j*l - d*g*j*m - d*g*j*o - d*g*l*m - d*g*l*o - d*i*j*l - d*i*j*m - d*i*j*o - d*j*l*m - d*j*l*o - e*g*i*l - e*g*i*m - e*g*i*o - e*g*j*l - e*g*j*m - e*g*j*o - e*g*l*m - e*g*l*o - e*i*j*l - e*i*j*m - e*i*j*o - e*j*l*m - e*j*l*o),
k: e*j*l*o*q/(d*e*i*l + d*e*i*m + d*e*i*o + d*e*j*l + d*e*j*m + d*e*j*o + d*e*l*m + d*e*l*o + d*g*i*l + d*g*i*m + d*g*i*o + d*g*j*l + d*g*j*m + d*g*j*o + d*g*l*m + d*g*l*o + d*i*j*l + d*i*j*m + d*i*j*o + d*j*l*m + d*j*l*o + e*g*i*l + e*g*i*m + e*g*i*o + e*g*j*l + e*g*j*m + e*g*j*o + e*g*l*m + e*g*l*o + e*i*j*l + e*i*j*m + e*i*j*o + e*j*l*m + e*j*l*o),
n: e*j*l*o*q/(d*e*i*l + d*e*i*m + d*e*i*o + d*e*j*l + d*e*j*m + d*e*j*o + d*e*l*m + d*e*l*o + d*g*i*l + d*g*i*m + d*g*i*o + d*g*j*l + d*g*j*m + d*g*j*o + d*g*l*m + d*g*l*o + d*i*j*l + d*i*j*m + d*i*j*o + d*j*l*m + d*j*l*o + e*g*i*l + e*g*i*m + e*g*i*o + e*g*j*l + e*g*j*m + e*g*j*o + e*g*l*m + e*g*l*o + e*i*j*l + e*i*j*m + e*i*j*o + e*j*l*m + e*j*l*o),
}
assert solve_lin_sys(eqs, domain) == sol
def test_solve_lin_sys_6x6_2():
ground, d,r,e,g,i,j,l,o,m,p,q = field("d,r,e,g,i,j,l,o,m,p,q", ZZ)
domain, c,f,h,k,n,b = ring("c,f,h,k,n,b", ground)
eqs = [b + r/d - c/d, c*(1/d + 1/e + 1/g) - f/g - r/d, f*(1/g + 1/i + 1/j) - c/g - h/i, h*(1/i + 1/l + 1/m) - f/i - k/m, k*(1/m + 1/o + 1/p) - h/m - n/p, n*(1/p + 1/q) - k/p]
sol = {
b: -((l*q*e*o + l*q*g*o + i*m*q*e + i*l*q*e + i*l*p*e + i*j*o*q + j*e*o*q + g*j*o*q + i*e*o*q + g*i*o*q + e*l*o*p + e*l*m*p + e*l*m*o + e*i*o*p + e*i*m*p + e*i*m*o + e*i*l*o + j*e*o*p + j*e*m*q + j*e*m*p + j*e*m*o + j*l*m*q + j*l*m*p + j*l*m*o + i*j*m*p + i*j*m*o + i*j*l*q + i*j*l*o + i*j*m*q + j*l*o*p + j*e*l*o + g*j*o*p + g*j*m*q + g*j*m*p + i*j*l*p + i*j*o*p + j*e*l*q + j*e*l*p + j*l*o*q + g*j*m*o + g*j*l*q + g*j*l*p + g*j*l*o + g*l*o*p + g*l*m*p + g*l*m*o + g*i*m*o + g*i*o*p + g*i*m*q + g*i*m*p + g*i*l*q + g*i*l*p + g*i*l*o + l*m*q*e + l*m*q*g)*r)/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
c: (r*e*(l*q*g*o + i*j*o*q + g*j*o*q + g*i*o*q + j*l*m*q + j*l*m*p + j*l*m*o + i*j*m*p + i*j*m*o + i*j*l*q + i*j*l*o + i*j*m*q + j*l*o*p + g*j*o*p + g*j*m*q + g*j*m*p + i*j*l*p + i*j*o*p + j*l*o*q + g*j*m*o + g*j*l*q + g*j*l*p + g*j*l*o + g*l*o*p + g*l*m*p + g*l*m*o + g*i*m*o + g*i*o*p + g*i*m*q + g*i*m*p + g*i*l*q + g*i*l*p + g*i*l*o + l*m*q*g))/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
f: (r*e*j*(l*q*o + l*o*p + l*m*q + l*m*p + l*m*o + i*o*q + i*o*p + i*m*q + i*m*p + i*m*o + i*l*q + i*l*p + i*l*o))/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
h: (j*e*r*l*(o*q + o*p + m*q + m*p + m*o))/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
k: (j*e*r*o*l*(q + p))/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
n: (j*e*r*o*q*l)/(l*q*d*e*o + l*q*d*g*o + l*q*e*g*o + i*j*d*o*q + i*j*e*o*q + j*d*e*o*q + g*j*d*o*q + g*j*e*o*q + g*i*e*o*q + i*d*e*o*q + g*i*d*o*q + g*i*d*o*p + g*i*d*m*q + g*i*d*m*p + g*i*d*m*o + g*i*d*l*q + g*i*d*l*p + g*i*d*l*o + g*e*l*m*p + g*e*l*o*p + g*j*e*l*q + g*e*l*m*o + g*j*e*m*p + g*j*e*m*o + d*e*l*m*p + d*e*l*m*o + i*d*e*m*p + g*j*e*l*p + g*j*e*l*o + d*e*l*o*p + i*j*d*l*o + i*j*e*o*p + i*j*e*m*q + i*j*d*m*q + i*j*d*m*p + i*j*d*m*o + i*j*d*l*q + i*j*d*l*p + i*j*e*m*p + i*j*e*m*o + i*j*e*l*q + i*j*e*l*p + i*j*e*l*o + i*d*e*m*q + i*d*e*m*o + i*d*e*l*q + i*d*e*l*p + j*d*l*o*p + j*d*e*l*o + g*j*d*o*p + g*j*d*m*q + g*j*d*m*p + g*j*d*m*o + g*j*d*l*q + g*j*d*l*p + g*j*d*l*o + g*j*e*o*p + g*j*e*m*q + g*d*l*o*p + g*d*l*m*p + g*d*l*m*o + j*d*e*m*p + i*d*e*o*p + j*e*o*q*l + j*e*o*p*l + j*e*m*q*l + j*d*e*o*p + j*d*e*m*q + i*j*d*o*p + g*i*e*o*p + j*d*e*m*o + j*d*e*l*q + j*d*e*l*p + j*e*m*p*l + j*e*m*o*l + g*i*e*m*q + g*i*e*m*p + g*i*e*m*o + g*i*e*l*q + g*i*e*l*p + g*i*e*l*o + j*d*l*o*q + j*d*l*m*q + j*d*l*m*p + j*d*l*m*o + i*d*e*l*o + l*m*q*d*e + l*m*q*d*g + l*m*q*e*g),
}
assert solve_lin_sys(eqs, domain) == sol
| bsd-3-clause |
Freso/picard | test/test_util_filenaming.py | 2 | 5602 | # -*- coding: utf-8 -*-
import os
import os.path
import unittest
import sys
from picard.util.filenaming import make_short_filename
class ShortFilenameTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
self.root = os.path.join(sys.platform == "win32" and "X:\\" or "/", "x" * 10)
if sys.platform in ("win32"):
self.max_len = 255
else:
self.max_len = os.statvfs("/").f_namemax
super(ShortFilenameTest, self).__init__(*args, **kwargs)
@unittest.skipUnless(sys.platform in ("win32", "darwin"), "windows / os x test")
def test_bmp_unicode_on_unicode_fs(self):
char = u"\N{LATIN SMALL LETTER SHARP S}"
fn = make_short_filename(self.root, os.path.join(*[char * 120] * 2))
self.assertEqual(fn, os.path.join(*[char * 120] * 2))
@unittest.skipUnless(sys.platform not in ("win32", "darwin"), "non-windows, non-osx test")
def test_bmp_unicode_on_nix(self):
char = u"\N{LATIN SMALL LETTER SHARP S}"
max_len = self.max_len
divisor = len(char.encode(sys.getfilesystemencoding()))
fn = make_short_filename(self.root, os.path.join(*[char * 200] * 2))
self.assertEqual(fn, os.path.join(*[char * (max_len // divisor)] * 2))
@unittest.skipUnless(sys.platform == "darwin", "os x test")
def test_precomposed_unicode_on_osx(self):
char = u"\N{LATIN SMALL LETTER A WITH BREVE}"
max_len = self.max_len
fn = make_short_filename(self.root, os.path.join(*[char * 200] * 2))
self.assertEqual(fn, os.path.join(*[char * (max_len // 2)] * 2))
@unittest.skipUnless(sys.platform == "win32", "windows test")
def test_nonbmp_unicode_on_windows(self):
char = u"\N{MUSICAL SYMBOL G CLEF}"
remaining = 259 - (3 + 10 + 1 + 200 + 1)
fn = make_short_filename(self.root, os.path.join(*[char * 100] * 2))
self.assertEqual(fn, os.path.join(char * 100, char * (remaining // 2)))
@unittest.skipUnless(sys.platform == "darwin", "os x test")
def test_nonbmp_unicode_on_osx(self):
char = u"\N{MUSICAL SYMBOL G CLEF}"
max_len = self.max_len
fn = make_short_filename(self.root, os.path.join(*[char * 200] * 2))
self.assertEqual(fn, os.path.join(*[char * (max_len // 2)] * 2))
@unittest.skipUnless(sys.platform not in ("win32", "darwin"), "non-windows, non-osx test")
def test_nonbmp_unicode_on_nix(self):
char = u"\N{MUSICAL SYMBOL G CLEF}"
max_len = self.max_len
divisor = len(char.encode(sys.getfilesystemencoding()))
fn = make_short_filename(self.root, os.path.join(*[char * 100] * 2))
self.assertEqual(fn, os.path.join(*[char * (max_len // divisor)] * 2))
@unittest.skipUnless(sys.platform not in ("win32", "darwin"), "non-windows, non-osx test")
def test_nonbmp_unicode_on_nix_with_windows_compat(self):
char = u"\N{MUSICAL SYMBOL G CLEF}"
max_len = self.max_len
remaining = 259 - (3 + 10 + 1 + 200 + 1)
divisor = len(char.encode(sys.getfilesystemencoding()))
fn = make_short_filename(self.root, os.path.join(*[char * 100] * 2), win_compat=True)
self.assertEqual(fn, os.path.join(char * (max_len // divisor), char * (remaining // 2)))
def test_windows_shortening(self):
fn = make_short_filename(self.root, os.path.join("a" * 200, "b" * 200, "c" * 200 + ".ext"), win_compat=True)
self.assertEqual(fn, os.path.join("a" * 116, "b" * 116, "c" * 7 + ".ext"))
@unittest.skipUnless(sys.platform != "win32", "non-windows test")
def test_windows_shortening_with_ancestor_on_nix(self):
root = os.path.join(self.root, "w" * 10, "x" * 10, "y" * 9, "z" * 9)
fn = make_short_filename(
root, os.path.join("b" * 200, "c" * 200, "d" * 200 + ".ext"),
win_compat=True, relative_to = self.root)
self.assertEqual(fn, os.path.join("b" * 100, "c" * 100, "d" * 7 + ".ext"))
def test_windows_node_maxlength_shortening(self):
max_len = 226
remaining = 259 - (3 + 10 + 1 + max_len + 1)
fn = make_short_filename(self.root, os.path.join("a" * 300, "b" * 100 + ".ext"), win_compat=True)
self.assertEqual(fn, os.path.join("a" * max_len, "b" * (remaining - 4) + ".ext"))
def test_windows_selective_shortening(self):
root = self.root + "x" * (44 - 10 - 3)
fn = make_short_filename(root, os.path.join(
os.path.join(*["a" * 9] * 10 + ["b" * 15] * 10), "c" * 10), win_compat=True)
self.assertEqual(fn, os.path.join(os.path.join(*["a" * 9] * 10 + ["b" * 9] * 10), "c" * 10))
def test_windows_shortening_not_needed(self):
root = self.root + "x" * 33
fn = make_short_filename(root, os.path.join(
os.path.join(*["a" * 9] * 20), "b" * 10), win_compat=True)
self.assertEqual(fn, os.path.join(os.path.join(*["a" * 9] * 20), "b" * 10))
def test_windows_path_too_long(self):
root = self.root + "x" * 230
self.assertRaises(IOError, make_short_filename,
root, os.path.join("a", "b", "c", "d"), win_compat=True)
def test_windows_path_not_too_long(self):
root = self.root + "x" * 230
fn = make_short_filename(root, os.path.join("a", "b", "c"), win_compat=True)
self.assertEqual(fn, os.path.join("a", "b", "c"))
def test_whitespace(self):
fn = make_short_filename(self.root, os.path.join("a1234567890 ", " b1234567890 "))
self.assertEqual(fn, os.path.join("a1234567890", "b1234567890"))
| gpl-2.0 |
RafaelTorrealba/odoo | addons/project_issue/project_issue.py | 217 | 29319 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('stage_id.fold', '=', False)])
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issue'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
atomic83/youtube-dl | youtube_dl/extractor/karrierevideos.py | 115 | 3200 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
fix_xml_ampersands,
float_or_none,
xpath_with_ns,
xpath_text,
)
class KarriereVideosIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin',
'info_dict': {
'id': '32c91',
'ext': 'flv',
'title': 'AltenpflegerIn',
'description': 'md5:dbadd1259fde2159a9b28667cb664ae2',
'thumbnail': 're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# broken ampersands
'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun',
'info_dict': {
'id': '5sniu',
'ext': 'flv',
'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"',
'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33',
'thumbnail': 're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None) or
self._search_regex(r'<h1 class="title">([^<]+)</h1>'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')
playlist = self._download_xml(
'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id,
video_id, transform_source=fix_xml_ampersands)
NS_MAP = {
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'
}
def ns(path):
return xpath_with_ns(path, NS_MAP)
item = playlist.find('./tracklist/item')
video_file = xpath_text(
item, ns('./jwplayer:file'), 'video url', fatal=True)
streamer = xpath_text(
item, ns('./jwplayer:streamer'), 'streamer', fatal=True)
uploader = xpath_text(
item, ns('./jwplayer:author'), 'uploader')
duration = float_or_none(
xpath_text(item, ns('./jwplayer:duration'), 'duration'))
description = self._html_search_regex(
r'(?s)<div class="leadtext">(.+?)</div>',
webpage, 'description')
thumbnail = self._html_search_meta(
'thumbnail', webpage, 'thumbnail')
if thumbnail:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'url': streamer.replace('rtmpt', 'rtmp'),
'play_path': 'mp4:%s' % video_file,
'ext': 'flv',
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
}
| unlicense |
rangadi/incubator-beam | sdks/python/apache_beam/io/filesystems_test.py | 6 | 8510 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
from __future__ import absolute_import
import filecmp
import logging
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator),) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(isinstance(FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual('/tmp/path/to/file',
FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegexp(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(list(error.exception.exception_details), [None])
def test_match_directory(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegexp(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(list(error.exception.exception_details.keys()),
[(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegexp(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(list(error.exception.exception_details.keys()),
[(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegexp(BeamIOError,
r'^Delete operation failed') as error:
FileSystems.delete([path1])
self.assertEqual(list(error.exception.exception_details.keys()), [path1])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
rbenjamin/namebench | nb_third_party/dns/tsigkeyring.py | 248 | 1658 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
| apache-2.0 |
Toshakins/wagtail | wagtail/wagtailimages/migrations/0002_initial_data.py | 13 | 2304 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import VERSION as DJANGO_VERSION
from django.db import migrations
def add_image_permissions_to_admin_groups(apps, schema_editor):
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
Group = apps.get_model('auth.Group')
# Get image permissions
image_content_type, _created = ContentType.objects.get_or_create(
model='image',
app_label='wagtailimages',
defaults={'name': 'image'} if DJANGO_VERSION < (1, 8) else {}
)
add_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='add_image',
defaults={'name': 'Can add image'}
)
change_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='change_image',
defaults={'name': 'Can change image'}
)
delete_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='delete_image',
defaults={'name': 'Can delete image'}
)
# Assign it to Editors and Moderators groups
for group in Group.objects.filter(name__in=['Editors', 'Moderators']):
group.permissions.add(add_image_permission, change_image_permission, delete_image_permission)
def remove_image_permissions(apps, schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
image_content_type = ContentType.objects.get(
model='image',
app_label='wagtailimages',
)
# This cascades to Group
Permission.objects.filter(
content_type=image_content_type,
codename__in=('add_image', 'change_image', 'delete_image')
).delete()
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_initial'),
# Need to run wagtailcores initial data migration to make sure the groups are created
('wagtailcore', '0002_initial_data'),
]
operations = [
migrations.RunPython(add_image_permissions_to_admin_groups, remove_image_permissions),
]
| bsd-3-clause |
tjanez/ansible | lib/ansible/modules/packaging/os/apt_rpm.py | 25 | 5328 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <evg@altlinux.org>
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(apt-get update).
required: false
default: no
choices: [ "yes", "no" ]
author: "Evgenii Terechkov (@evgkrsk)"
notes: []
'''
EXAMPLES = '''
# install package foo
- apt_rpm:
pkg: foo
state: present
# remove package foo
- apt_rpm:
pkg: foo
state: absent
# description: remove packages foo and bar
- apt_rpm:
pkg: foo,bar
state: absent
# description: update the package database and install bar (bar will be the updated if a newer version exists)
- apt_rpm:
name: bar
state: present
update_cache: yes
'''
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import sys
APT_PATH="/usr/bin/apt-get"
RPM_PATH="/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q %s" % (RPM_PATH,name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH,name))
return rc == 0
def update_package_db(module):
rc, out, err = module.run_command("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH,package))
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
hasteur/g13bot_tools_new | scripts/maintenance/compat2core.py | 1 | 7684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A helper script to convert compat 1.0 scripts to the new core 2.0 framework.
NOTE: Please be aware that this script is not able to convert your codes
completely. It may support you with some automatic replacements and it gives
some warnings and hints for converting. Please refer to the converting guide
README-conversion.txt in the core framework folder and check your codes finally.
The scripts asks for the .py file and converts it to
<scriptname>-core.py in the same directory. The following options are supported:
- warnonly: Do not convert the source but show warning messages. This is good
to check already merged scripts.
usage
to convert a script and show warnings about deprecated methods:
python pwb.py compat2core <scriptname>
to show warnings about deprecated methods:
python pwb.py compat2core <scriptname> -warnonly
"""
#
# (C) xqt, 2014-2017
# (C) Pywikibot team, 2014-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import os
import re
import pywikibot
# be careful with replacement order!
replacements = (
# doc strings
('#\r?\n__version__',
'#\n# Automatically ported from compat branch by compat2core.py script\n'
'#\n__version__'),
('Pywikipedia bot team', 'Pywikibot team'),
# importing changes
('import wikipedia(?: as pywikibot)?', 'import pywikibot'),
('(?<!from pywikibot )import (config|pagegenerators)',
r'from pywikibot import \1'),
('(?<!from pywikibot.compat )import query',
'from pywikibot.compat import query'),
# remove deprecated libs
('import catlib\r?\n', ''),
('import userlib\r?\n', ''),
# change wikipedia to pywikibot, exclude URLs
(r'(?<!\.)wikipedia\.', u'pywikibot.'),
# site instance call
(r'pywikibot\.getSite\s*\(\s*', 'pywikibot.Site('),
# lang is different from code. We should use code in core
(r'([Ss])ite.lang(?:uage\(\))?', r'\1ite.code'),
# change compat library classes to pywikibot intrinsic classes
(r'catlib\.Category\s*\(\s*', 'pywikibot.Category('),
(r'catlib\.change_category\s*\((\s*)(?P<article>.+?),\s*(?P<oldcat>.+?),',
r'\g<article>.change_category(\1\g<oldcat>,'),
(r'userlib\.User\s*\(\s*', 'pywikibot.User('),
# change ImagePage to FilePage
(r'pywikibot\.ImagePage\s*\(\s*', 'pywikibot.FilePage('),
# deprecated title methods
(r'\.urlname\s*\(\s*\)', '.title(asUrl=True)'),
(r'\.urlname\s*\(\s*(?:withNamespace\s*=\s*)?(True|False)+\s*\)',
r'.title(asUrl=True, withNamespace=\1)'),
(r'\.titleWithoutNamespace\s*\(\s*\)', '.title(withNamespace=False)'),
(r'\.sectionFreeTitle\s*\(\s*\)', '.title(withSection=False)'),
(r'\.aslink\s*\(\s*\)', '.title(asLink=True)'),
# other deprecated methods
(r'(?<!site)\.encoding\s*\(\s*\)', '.site.encoding()'),
(r'\.newimages\s*\(\)', ".logevents(logtype='upload')"),
(r'\.newimages\s*\(([^)])', r".logevents(logtype='upload', \1"),
(r'\.getRestrictions\s*\(', '.protection('),
# new core methods and properties
(r'\.get\s*\(\s*get_redirect\s*=\s*True\s*\)', '.text'),
(r'(?:pywikibot|wikipedia)\.verbose', 'config.verbose_output'),
# stopme() is done by the framework itself
(r'(\s*)try\:\s*\r?\n\s+main\(\)\s*\r?\n\s*finally\:\s*\r?\n'
r'\s+pywikibot\.stopme\(\)',
r'\1main()'),
)
# some warnings which must be changed manually
warnings = (
('pywikibot.setAction(',
'setAction() no longer works; you must pass an explicit edit summary\n'
'message to put() or put_async()'),
('.removeImage(',
'Page.removeImage() is deprecated and does not work at core'),
('.replaceImage(',
'Page.replaceImage() is deprecated and does not work at core'),
('.getVersionHistory(',
'Page.getVersionHistory() returns a pywikibot.Timestamp object instead of\n'
'a MediaWiki one'),
('.contributions(',
'User.contributions() returns a pywikibot.Timestamp object instead of a\n'
'MediaWiki one'),
('.getFileMd5Sum(',
'FilePage.getFileMd5Sum() is deprecated should be replaced by '
'FilePage.latest_file_info.sha1'),
(' wikipedia.',
'"wikipedia" library has been changed to "pywikibot".'),
('from wikipedia import',
'"wikipedia" library has been changed to "pywikibot". Please find the\n'
'right way to import your object.'),
('query.GetData(',
'query.GetData() should be replaced by pywikibot.data.api.Request or\n'
'by a direct site request'),
('.verbose',
'verbose_output need "from pywikibot import config" first'),
('templatesWithParams(',
'the first item of each template info is a Page object of the template,\n'
'not the title. '
'Please refer README-conversion.txt and the documentation.'),
)
class ConvertBot(object):
"""Script conversion bot."""
def __init__(self, filename=None, warnonly=False):
"""Constructor."""
self.source = filename
self.warnonly = warnonly
def run(self):
"""Run the bot."""
self.get_source()
self.get_dest()
if not self.warnonly:
self.convert()
self.warning()
def get_source(self):
"""Get source script."""
while True:
if self.source is None:
self.source = pywikibot.input(
'Please input the .py file to convert '
'(no input to leave):')
if not self.source:
exit()
if not self.source.endswith(u'.py'):
self.source += '.py'
if os.path.exists(self.source):
break
self.source = os.path.join('scripts', self.source)
if os.path.exists(self.source):
break
pywikibot.output(u'%s does not exist. Please retry.' % self.source)
self.source = None
def get_dest(self):
"""Ask for destination script name."""
self.dest = u'%s-core.%s' % tuple(self.source.rsplit(u'.', 1))
if not self.warnonly and not pywikibot.input_yn(
u'Destination file is %s.' % self.dest,
default=True, automatic_quit=False):
pywikibot.output('Quitting...')
exit()
def convert(self):
"""Convert script."""
with codecs.open(self.source, 'r', 'utf-8') as f:
text = f.read()
for r in replacements:
text = re.sub(r[0], r[1], text)
with codecs.open(self.dest, 'w', 'utf-8') as g:
g.write(text)
def warning(self):
"""Show warnings and hints."""
filename = self.source if self.warnonly else self.dest
with codecs.open(filename, 'r', 'utf-8') as g:
lines = enumerate(g.readlines(), start=1)
for i, line in lines:
for w in warnings:
if w[0] in line:
pywikibot.warning(
'line {0}: {1}>>> {2}\n'.format(i, line, w[1]))
def main():
"""Process command line arguments and invoke bot."""
filename = None
warnonly = False
# Parse command line arguments for -help option
for arg in pywikibot.handleArgs():
if arg.startswith('-warnonly'):
warnonly = True
elif not arg.startswith('-'):
filename = arg
else:
pywikibot.warning(arg + ' is not supported')
bot = ConvertBot(filename, warnonly)
bot.run()
if __name__ == "__main__":
pywikibot.stopme() # we do not work on any site
main()
| mit |
osinstom/onos | tools/test/topos/default.py | 6 | 6825 | #!/usr/bin/env python
"""
"""
from mininet.topo import Topo
class AttMplsTopo( Topo ):
"Internet Topology Zoo Specimen."
def addSwitch( self, name, **opts ):
kwargs = { 'protocols' : 'OpenFlow13' }
kwargs.update( opts )
return super(AttMplsTopo, self).addSwitch( name, **kwargs )
def __init__( self ):
"Create a topology."
# Initialize Topology
Topo.__init__( self )
# add nodes, switches first...
NY54 = self.addSwitch( 's25' ) # 40.728270, -73.994483
CMBR = self.addSwitch( 's1' ) # 42.373730, -71.109734
CHCG = self.addSwitch( 's2' ) # 41.877461, -87.642892
CLEV = self.addSwitch( 's3' ) # 41.498928, -81.695217
RLGH = self.addSwitch( 's4' ) # 35.780150, -78.644026
ATLN = self.addSwitch( 's5' ) # 33.749017, -84.394168
PHLA = self.addSwitch( 's6' ) # 39.952906, -75.172278
WASH = self.addSwitch( 's7' ) # 38.906696, -77.035509
NSVL = self.addSwitch( 's8' ) # 36.166410, -86.787305
STLS = self.addSwitch( 's9' ) # 38.626418, -90.198143
NWOR = self.addSwitch( 's10' ) # 29.951475, -90.078434
HSTN = self.addSwitch( 's11' ) # 29.763249, -95.368332
SNAN = self.addSwitch( 's12' ) # 29.424331, -98.491745
DLLS = self.addSwitch( 's13' ) # 32.777665, -96.802064
ORLD = self.addSwitch( 's14' ) # 28.538641, -81.381110
DNVR = self.addSwitch( 's15' ) # 39.736623, -104.984887
KSCY = self.addSwitch( 's16' ) # 39.100725, -94.581228
SNFN = self.addSwitch( 's17' ) # 37.779751, -122.409791
SCRM = self.addSwitch( 's18' ) # 38.581001, -121.497844
PTLD = self.addSwitch( 's19' ) # 45.523317, -122.677768
STTL = self.addSwitch( 's20' ) # 47.607326, -122.331786
SLKC = self.addSwitch( 's21' ) # 40.759577, -111.895079
LA03 = self.addSwitch( 's22' ) # 34.056346, -118.235951
SNDG = self.addSwitch( 's23' ) # 32.714564, -117.153528
PHNX = self.addSwitch( 's24' ) # 33.448289, -112.076299
# ... and now hosts
NY54_host = self.addHost( 'h25' )
CMBR_host = self.addHost( 'h1' )
CHCG_host = self.addHost( 'h2' )
CLEV_host = self.addHost( 'h3' )
RLGH_host = self.addHost( 'h4' )
ATLN_host = self.addHost( 'h5' )
PHLA_host = self.addHost( 'h6' )
WASH_host = self.addHost( 'h7' )
NSVL_host = self.addHost( 'h8' )
STLS_host = self.addHost( 'h9' )
NWOR_host = self.addHost( 'h10' )
HSTN_host = self.addHost( 'h11' )
SNAN_host = self.addHost( 'h12' )
DLLS_host = self.addHost( 'h13' )
ORLD_host = self.addHost( 'h14' )
DNVR_host = self.addHost( 'h15' )
KSCY_host = self.addHost( 'h16' )
SNFN_host = self.addHost( 'h17' )
SCRM_host = self.addHost( 'h18' )
PTLD_host = self.addHost( 'h19' )
STTL_host = self.addHost( 'h20' )
SLKC_host = self.addHost( 'h21' )
LA03_host = self.addHost( 'h22' )
SNDG_host = self.addHost( 'h23' )
PHNX_host = self.addHost( 'h24' )
# add edges between switch and corresponding host
self.addLink( NY54 , NY54_host )
self.addLink( CMBR , CMBR_host )
self.addLink( CHCG , CHCG_host )
self.addLink( CLEV , CLEV_host )
self.addLink( RLGH , RLGH_host )
self.addLink( ATLN , ATLN_host )
self.addLink( PHLA , PHLA_host )
self.addLink( WASH , WASH_host )
self.addLink( NSVL , NSVL_host )
self.addLink( STLS , STLS_host )
self.addLink( NWOR , NWOR_host )
self.addLink( HSTN , HSTN_host )
self.addLink( SNAN , SNAN_host )
self.addLink( DLLS , DLLS_host )
self.addLink( ORLD , ORLD_host )
self.addLink( DNVR , DNVR_host )
self.addLink( KSCY , KSCY_host )
self.addLink( SNFN , SNFN_host )
self.addLink( SCRM , SCRM_host )
self.addLink( PTLD , PTLD_host )
self.addLink( STTL , STTL_host )
self.addLink( SLKC , SLKC_host )
self.addLink( LA03 , LA03_host )
self.addLink( SNDG , SNDG_host )
self.addLink( PHNX , PHNX_host )
# add edges between switches
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CMBR)
self.addLink( NY54 , CHCG)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , PHLA)
self.addLink( NY54 , WASH)
self.addLink( CMBR , PHLA)
self.addLink( CHCG , CLEV)
self.addLink( CHCG , PHLA)
self.addLink( CHCG , STLS)
self.addLink( CHCG , DNVR)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , KSCY)
self.addLink( CHCG , SNFN)
self.addLink( CHCG , STTL)
self.addLink( CHCG , SLKC)
self.addLink( CLEV , NSVL)
self.addLink( CLEV , STLS)
self.addLink( CLEV , PHLA)
self.addLink( RLGH , ATLN)
self.addLink( RLGH , WASH)
self.addLink( ATLN , WASH)
self.addLink( ATLN , NSVL)
self.addLink( ATLN , STLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , DLLS)
self.addLink( ATLN , ORLD)
self.addLink( PHLA , WASH)
self.addLink( NSVL , STLS)
self.addLink( NSVL , DLLS)
self.addLink( STLS , DLLS)
self.addLink( STLS , KSCY)
self.addLink( STLS , LA03)
self.addLink( NWOR , HSTN)
self.addLink( NWOR , DLLS)
self.addLink( NWOR , ORLD)
self.addLink( HSTN , SNAN)
self.addLink( HSTN , DLLS)
self.addLink( HSTN , ORLD)
self.addLink( SNAN , PHNX)
self.addLink( SNAN , DLLS)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , DNVR)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , KSCY)
self.addLink( DLLS , SNFN)
self.addLink( DLLS , LA03)
self.addLink( DLLS , LA03)
self.addLink( DNVR , KSCY)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SNFN)
self.addLink( DNVR , SLKC)
self.addLink( KSCY , SNFN)
self.addLink( SNFN , SCRM)
self.addLink( SNFN , PTLD)
self.addLink( SNFN , STTL)
self.addLink( SNFN , SLKC)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SNFN , LA03)
self.addLink( SCRM , SLKC)
self.addLink( PTLD , STTL)
self.addLink( SLKC , LA03)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , SNDG)
self.addLink( LA03 , PHNX)
self.addLink( LA03 , PHNX)
self.addLink( SNDG , PHNX)
topos = { 'att': ( lambda: AttMplsTopo() ) }
if __name__ == '__main__':
from onosnet import run
run( AttMplsTopo() )
| apache-2.0 |
shyamjvs/test-infra | jenkins/bootstrap.py | 2 | 43714 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# check out repoes defined in --repo
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
ORIG_CWD = os.getcwd() # Checkout changes cwd
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True # Read everything
# Strip \n at the end if any. Last line of file may not have one.
append(line.rstrip('\n'))
# Is there more on the buffer?
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False # Cleared buffer but not at the end
return False # Time expired
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill: # Process will not die, kill everything
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None, log_failures=True, env=None): # pylint: disable=too-many-locals
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60) # Allow at least 60s per command
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
env=env,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)),
}
while reads:
if terminate(end, proc, timeout):
if timeout: # We killed everything
break
# Give subprocess some cleanup time before killing.
end = time.time() + 15 * 60
timeout = True
ret = select.select(reads, [], [], 0.1)
for fdesc in ret[0]:
if read_all(end, *reads[fdesc]):
reads.pop(fdesc)
if not ret[0] and proc.poll() is not None:
break # process exited without closing pipes (timeout?)
code = proc.wait()
if timeout:
code = code or 124
logging.error('Build timed out')
if code and log_failures:
logging.error('Command failed')
logging.info(
'process %d exited with code %d after %.1fm',
proc.pid, code, elapsed(begin))
out.append('')
lines = output and '\n'.join(out)
if check and code:
raise subprocess.CalledProcessError(code, cmd, lines)
return lines
def ref_has_shas(ref):
"""Determine if a reference specifies shas (contains ':')"""
return isinstance(ref, basestring) and ':' in ref
def pull_numbers(pull):
"""Turn a pull reference list into a list of PR numbers to merge."""
if ref_has_shas(pull):
return [r.split(':')[0] for r in pull.split(',')][1:]
return [str(pull)]
def pull_ref(pull):
"""Turn a PR number of list of refs into specific refs to fetch and check out."""
if isinstance(pull, int) or ',' not in pull:
return ['+refs/pull/%d/merge' % int(pull)], ['FETCH_HEAD']
pulls = pull.split(',')
refs = []
checkouts = []
for ref in pulls:
change_ref = None
if ':' in ref: # master:abcd or 1234:abcd or 1234:abcd:ref/for/pr
res = ref.split(':')
name = res[0]
sha = res[1]
if len(res) > 2:
change_ref = res[2]
elif not refs: # master
name, sha = ref, 'FETCH_HEAD'
else:
name = ref
sha = 'refs/pr/%s' % ref
checkouts.append(sha)
if not refs: # First ref should be branch to merge into
refs.append(name)
elif change_ref: # explicit change refs
refs.append(change_ref)
else: # PR numbers
num = int(name)
refs.append('+refs/pull/%d/head:refs/pr/%d' % (num, num))
return refs, checkouts
def branch_ref(branch):
"""Split branch:sha if necessary."""
if ref_has_shas(branch):
split_refs = branch.split(':')
return [split_refs[0]], [split_refs[1]]
return [branch], ['FETCH_HEAD']
def repository(repo, ssh):
"""Return the url associated with the repo."""
if repo.startswith('k8s.io/'):
repo = 'github.com/kubernetes/%s' % (repo[len('k8s.io/'):])
elif repo.startswith('sigs.k8s.io/'):
repo = 'github.com/kubernetes-sigs/%s' % (repo[len('sigs.k8s.io/'):])
elif repo.startswith('istio.io/'):
repo = 'github.com/istio/%s' % (repo[len('istio.io/'):])
if ssh:
if ":" not in repo:
parts = repo.split('/', 1)
repo = '%s:%s' % (parts[0], parts[1])
return 'git@%s' % repo
return 'https://%s' % repo
def random_sleep(attempt):
"""Sleep 2**attempt seconds with a random fractional offset."""
time.sleep(random.random() + attempt ** 2)
def auth_google_gerrit(git, call):
"""authenticate to foo.googlesource.com"""
call([git, 'clone', 'https://gerrit.googlesource.com/gcompute-tools'])
call(['./gcompute-tools/git-cookie-authdaemon'])
def commit_date(git, commit, call):
try:
return call([git, 'show', '-s', '--format=format:%ct', commit],
output=True, log_failures=False)
except subprocess.CalledProcessError:
logging.warning('Unable to print commit date for %s', commit)
return None
def checkout(call, repo, repo_path, branch, pull, ssh='', git_cache='', clean=False):
"""Fetch and checkout the repository at the specified branch/pull.
Note that repo and repo_path should usually be the same, but repo_path can
be set to a different relative path where repo should be checked out."""
# pylint: disable=too-many-locals,too-many-branches
if bool(branch) == bool(pull):
raise ValueError('Must specify one of --branch or --pull')
if pull:
refs, checkouts = pull_ref(pull)
else:
refs, checkouts = branch_ref(branch)
git = 'git'
# auth to google gerrit instance
# TODO(krzyzacy): when migrate to init container we'll make a gerrit
# checkout image and move this logic there
if '.googlesource.com' in repo:
auth_google_gerrit(git, call)
if git_cache:
cache_dir = '%s/%s' % (git_cache, repo)
try:
os.makedirs(cache_dir)
except OSError:
pass
call([git, 'init', repo_path, '--separate-git-dir=%s' % cache_dir])
call(['rm', '-f', '%s/index.lock' % cache_dir])
else:
call([git, 'init', repo_path])
os.chdir(repo_path)
if clean:
call([git, 'clean', '-dfx'])
call([git, 'reset', '--hard'])
# To make a merge commit, a user needs to be set. It's okay to use a dummy
# user here, since we're not exporting the history.
call([git, 'config', '--local', 'user.name', 'K8S Bootstrap'])
call([git, 'config', '--local', 'user.email', 'k8s_bootstrap@localhost'])
retries = 3
for attempt in range(retries):
try:
call([git, 'fetch', '--quiet', '--tags', repository(repo, ssh)] + refs)
break
except subprocess.CalledProcessError as cpe:
if attempt >= retries - 1:
raise
if cpe.returncode != 128:
raise
logging.warning('git fetch failed')
random_sleep(attempt)
call([git, 'checkout', '-B', 'test', checkouts[0]])
# Lie about the date in merge commits: use sequential seconds after the
# commit date of the tip of the parent branch we're checking into.
merge_date = int(commit_date(git, 'HEAD', call) or time.time())
git_merge_env = os.environ.copy()
for ref, head in zip(refs, checkouts)[1:]:
merge_date += 1
git_merge_env[GIT_AUTHOR_DATE_ENV] = str(merge_date)
git_merge_env[GIT_COMMITTER_DATE_ENV] = str(merge_date)
call(['git', 'merge', '--no-ff', '-m', 'Merge %s' % ref, head],
env=git_merge_env)
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()}
def start(gsutil, paths, stamp, node_name, version, repos):
"""Construct and upload started.json."""
data = {
'timestamp': int(stamp),
'node': node_name,
}
if version:
data['repo-version'] = version
data['version'] = version # TODO(fejta): retire
if repos:
pull = repos[repos.main]
if ref_has_shas(pull[1]):
data['pull'] = pull[1]
data['repos'] = repos_dict(repos)
if POD_ENV in os.environ:
data['metadata'] = {'pod': os.environ[POD_ENV]}
gsutil.upload_json(paths.started, data)
# Upload a link to the build path in the directory
if paths.pr_build_link:
gsutil.upload_text(
paths.pr_build_link,
paths.pr_path,
additional_headers=['-h', 'x-goog-meta-link: %s' % paths.pr_path]
)
class GSUtil(object):
"""A helper class for making gsutil commands."""
gsutil = 'gsutil'
def __init__(self, call):
self.call = call
def stat(self, path):
"""Return metadata about the object, such as generation."""
cmd = [self.gsutil, 'stat', path]
return self.call(cmd, output=True, log_failures=False)
def ls(self, path):
"""List a bucket or subdir."""
cmd = [self.gsutil, 'ls', path]
return self.call(cmd, output=True)
def upload_json(self, path, jdict, generation=None):
"""Upload the dictionary object to path."""
if generation is not None: # generation==0 means object does not exist
gen = ['-h', 'x-goog-if-generation-match:%s' % generation]
else:
gen = []
with tempfile.NamedTemporaryFile(prefix='gsutil_') as fp:
fp.write(json.dumps(jdict, indent=2))
fp.flush()
cmd = [
self.gsutil, '-q',
'-h', 'Content-Type:application/json'] + gen + [
'cp', fp.name, path]
self.call(cmd)
def copy_file(self, dest, orig):
"""Copy the file to the specified path using compressed encoding."""
cmd = [self.gsutil, '-q', 'cp', '-Z', orig, dest]
self.call(cmd)
def upload_text(self, path, txt, additional_headers=None, cached=True):
"""Copy the text to path, optionally disabling caching."""
headers = ['-h', 'Content-Type:text/plain']
if not cached:
headers += ['-h', 'Cache-Control:private, max-age=0, no-transform']
if additional_headers:
headers += additional_headers
with tempfile.NamedTemporaryFile(prefix='gsutil_') as fp:
fp.write(txt)
fp.flush()
cmd = [self.gsutil, '-q'] + headers + ['cp', fp.name, path]
self.call(cmd)
def cat(self, path, generation):
"""Return contents of path#generation"""
cmd = [self.gsutil, '-q', 'cat', '%s#%s' % (path, generation)]
return self.call(cmd, output=True)
def upload_artifacts(self, gsutil, path, artifacts):
"""Upload artifacts to the specified path."""
# Upload artifacts
if not os.path.isdir(artifacts):
logging.warning('Artifacts dir %s is missing.', artifacts)
return
original_artifacts = artifacts
try:
# If remote path exists, it will create .../_artifacts subdir instead
gsutil.ls(path)
# Success means remote path exists
remote_base = os.path.basename(path)
local_base = os.path.basename(artifacts)
if remote_base != local_base:
# if basename are different, need to copy things over first.
localpath = artifacts.replace(local_base, remote_base)
os.rename(artifacts, localpath)
artifacts = localpath
path = path[:-len(remote_base + '/')]
except subprocess.CalledProcessError:
logging.warning('Remote dir %s not exist yet', path)
cmd = [
self.gsutil, '-m', '-q',
'-o', 'GSUtil:use_magicfile=True',
'cp', '-r', '-c', '-z', 'log,txt,xml',
artifacts, path,
]
self.call(cmd)
# rename the artifacts dir back
# other places still references the original artifacts dir
if original_artifacts != artifacts:
os.rename(artifacts, original_artifacts)
def append_result(gsutil, path, build, version, passed):
"""Download a json list and append metadata about this build to it."""
# TODO(fejta): delete the clone of this logic in upload-to-gcs.sh
# (this is update_job_result_cache)
end = time.time() + 300 # try for up to five minutes
errors = 0
while time.time() < end:
if errors:
random_sleep(min(errors, 3))
try:
out = gsutil.stat(path)
gen = re.search(r'Generation:\s+(\d+)', out).group(1)
except subprocess.CalledProcessError:
gen = 0
if gen:
try:
cache = json.loads(gsutil.cat(path, gen))
if not isinstance(cache, list):
raise ValueError(cache)
except ValueError as exc:
logging.warning('Failed to decode JSON: %s', exc)
cache = []
except subprocess.CalledProcessError: # gen doesn't exist
errors += 1
continue
else:
cache = []
cache.append({
'version': version, # TODO(fejta): retire
'job-version': version,
'buildnumber': build,
'passed': bool(passed),
'result': 'SUCCESS' if passed else 'FAILURE',
})
cache = cache[-300:]
try:
gsutil.upload_json(path, cache, generation=gen)
return
except subprocess.CalledProcessError:
logging.warning('Failed to append to %s#%s', path, gen)
errors += 1
def metadata(repos, artifacts, call):
"""Return metadata associated for the build, including inside artifacts."""
path = os.path.join(artifacts or '', 'metadata.json')
meta = None
if os.path.isfile(path):
try:
with open(path) as fp:
meta = json.loads(fp.read())
except (IOError, ValueError):
logging.warning('Failed to open %s', path)
else:
logging.warning('metadata path %s does not exist', path)
if not meta or not isinstance(meta, dict):
logging.warning(
'metadata not found or invalid, init with empty metadata')
meta = {}
if repos:
meta['repo'] = repos.main
meta['repos'] = repos_dict(repos)
if POD_ENV in os.environ:
# HARDEN against metadata only being read from finished.
meta['pod'] = os.environ[POD_ENV]
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['repo-commit'] = commit.strip()
except subprocess.CalledProcessError:
pass
cwd = os.getcwd()
os.chdir(test_infra('.'))
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['infra-commit'] = commit.strip()[:9]
except subprocess.CalledProcessError:
pass
os.chdir(cwd)
return meta
def finish(gsutil, paths, success, artifacts, build, version, repos, call):
"""
Args:
paths: a Paths instance.
success: the build passed if true.
artifacts: a dir containing artifacts to upload.
build: identifier of this build.
version: identifies what version of the code the build tested.
repo: the target repo
"""
if os.path.isdir(artifacts) and any(f for _, _, f in os.walk(artifacts)):
try:
gsutil.upload_artifacts(gsutil, paths.artifacts, artifacts)
except subprocess.CalledProcessError:
logging.warning('Failed to upload artifacts')
else:
logging.warning('Missing local artifacts : %s', artifacts)
meta = metadata(repos, artifacts, call)
if not version:
version = meta.get('job-version')
if not version: # TODO(fejta): retire
version = meta.get('version')
# github.com/kubernetes/release/find_green_build depends on append_result()
# TODO(fejta): reconsider whether this is how we want to solve this problem.
append_result(gsutil, paths.result_cache, build, version, success)
if paths.pr_result_cache:
append_result(gsutil, paths.pr_result_cache, build, version, success)
data = {
# TODO(fejta): update utils.go in contrib to accept a float
'timestamp': int(time.time()),
'result': 'SUCCESS' if success else 'FAILURE',
'passed': bool(success),
'metadata': meta,
}
if version:
data['job-version'] = version
data['version'] = version # TODO(fejta): retire
gsutil.upload_json(paths.finished, data)
# Upload the latest build for the job.
# Do this last, since other tools expect the rest of the data to be
# published when this file is created.
for path in {paths.latest, paths.pr_latest}:
if path:
try:
gsutil.upload_text(path, str(build), cached=False)
except subprocess.CalledProcessError:
logging.warning('Failed to update %s', path)
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def node():
"""Return the name of the node running the build."""
# TODO(fejta): jenkins sets the node name and our infra expect this value.
# TODO(fejta): Consider doing something different here.
if NODE_ENV not in os.environ:
host = socket.gethostname().split('.')[0]
try:
# Try reading the name of the VM we're running on, using the
# metadata server.
os.environ[NODE_ENV] = urllib2.urlopen(urllib2.Request(
'http://169.254.169.254/computeMetadata/v1/instance/name',
headers={'Metadata-Flavor': 'Google'})).read()
os.environ[POD_ENV] = host # We also want to log this.
except IOError: # Fallback.
os.environ[NODE_ENV] = host
return os.environ[NODE_ENV]
def find_version(call):
"""Determine and return the version of the build."""
# TODO(fejta): once job-version is functional switch this to
# git rev-parse [--short=N] HEAD^{commit}
version_file = 'version'
if os.path.isfile(version_file):
# e2e tests which download kubernetes use this path:
with open(version_file) as fp:
return fp.read().strip()
version_script = 'hack/lib/version.sh'
if os.path.isfile(version_script):
cmd = [
'bash', '-c', (
"""
set -o errexit
set -o nounset
export KUBE_ROOT=.
source %s
kube::version::get_version_vars
echo $KUBE_GIT_VERSION
""" % version_script)
]
return call(cmd, output=True).strip()
return 'unknown'
class Paths(object): # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Links to remote gcs-paths for uploading results."""
def __init__( # pylint: disable=too-many-arguments
self,
artifacts, # artifacts folder (in build)
build_log, # build-log.txt (in build)
pr_path, # path to build
finished, # finished.json (metadata from end of build)
latest, # latest-build.txt (in job)
pr_build_link, # file containng pr_path (in job directory)
pr_latest, # latest-build.txt (in pr job)
pr_result_cache, # jobResultsCache.json (in pr job)
result_cache, # jobResultsCache.json (cache of latest results in job)
started, # started.json (metadata from start of build)
):
self.artifacts = artifacts
self.build_log = build_log
self.pr_path = pr_path
self.finished = finished
self.latest = latest
self.pr_build_link = pr_build_link
self.pr_latest = pr_latest
self.pr_result_cache = pr_result_cache
self.result_cache = result_cache
self.started = started
def ci_paths(base, job, build):
"""Return a Paths() instance for a continuous build."""
latest = os.path.join(base, job, 'latest-build.txt')
return Paths(
artifacts=os.path.join(base, job, build, 'artifacts'),
build_log=os.path.join(base, job, build, 'build-log.txt'),
pr_path=None,
finished=os.path.join(base, job, build, 'finished.json'),
latest=latest,
pr_build_link=None,
pr_latest=None,
pr_result_cache=None,
result_cache=os.path.join(base, job, 'jobResultsCache.json'),
started=os.path.join(base, job, build, 'started.json'),
)
def pr_paths(base, repos, job, build):
"""Return a Paths() instance for a PR."""
if not repos:
raise ValueError('repos is empty')
repo = repos.main
pull = str(repos[repo][1])
if repo in ['k8s.io/kubernetes', 'kubernetes/kubernetes']:
prefix = ''
elif repo.startswith('k8s.io/'):
prefix = repo[len('k8s.io/'):]
elif repo.startswith('kubernetes/'):
prefix = repo[len('kubernetes/'):]
elif repo.startswith('github.com/'):
prefix = repo[len('github.com/'):].replace('/', '_')
else:
prefix = repo.replace('/', '_')
# Batch merges are those with more than one PR specified.
pr_nums = pull_numbers(pull)
if len(pr_nums) > 1:
pull = os.path.join(prefix, 'batch')
else:
pull = os.path.join(prefix, pr_nums[0])
pr_path = os.path.join(base, 'pull', pull, job, build)
result_cache = os.path.join(
base, 'directory', job, 'jobResultsCache.json')
pr_result_cache = os.path.join(
base, 'pull', pull, job, 'jobResultsCache.json')
return Paths(
artifacts=os.path.join(pr_path, 'artifacts'),
build_log=os.path.join(pr_path, 'build-log.txt'),
pr_path=pr_path,
finished=os.path.join(pr_path, 'finished.json'),
latest=os.path.join(base, 'directory', job, 'latest-build.txt'),
pr_build_link=os.path.join(base, 'directory', job, '%s.txt' % build),
pr_latest=os.path.join(base, 'pull', pull, job, 'latest-build.txt'),
pr_result_cache=pr_result_cache,
result_cache=result_cache,
started=os.path.join(pr_path, 'started.json'),
)
BUILD_ENV = 'BUILD_ID'
BOOTSTRAP_ENV = 'BOOTSTRAP_MIGRATION'
CLOUDSDK_ENV = 'CLOUDSDK_CONFIG'
GCE_KEY_ENV = 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE'
GUBERNATOR = 'https://gubernator.k8s.io/build'
HOME_ENV = 'HOME'
JENKINS_HOME_ENV = 'JENKINS_HOME'
K8S_ENV = 'KUBERNETES_SERVICE_HOST'
JOB_ENV = 'JOB_NAME'
NODE_ENV = 'NODE_NAME'
POD_ENV = 'POD_NAME'
SERVICE_ACCOUNT_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
WORKSPACE_ENV = 'WORKSPACE'
GCS_ARTIFACTS_ENV = 'GCS_ARTIFACTS_DIR'
IMAGE_NAME_ENV = 'IMAGE'
GIT_AUTHOR_DATE_ENV = 'GIT_AUTHOR_DATE'
GIT_COMMITTER_DATE_ENV = 'GIT_COMMITTER_DATE'
SOURCE_DATE_EPOCH_ENV = 'SOURCE_DATE_EPOCH'
JOB_ARTIFACTS_ENV = 'ARTIFACTS'
def build_name(started):
"""Return the unique(ish) string representing this build."""
# TODO(fejta): right now jenkins sets the BUILD_ID and does this
# in an environment variable. Consider migrating this to a
# bootstrap.py flag
if BUILD_ENV not in os.environ:
# Automatically generate a build number if none is set
uniq = '%x-%d' % (hash(node()), os.getpid())
autogen = time.strftime('%Y%m%d-%H%M%S-' + uniq, time.gmtime(started))
os.environ[BUILD_ENV] = autogen
return os.environ[BUILD_ENV]
def setup_credentials(call, robot, upload):
"""Activate the service account unless robot is none."""
# TODO(fejta): stop activating inside the image
# TODO(fejta): allow use of existing gcloud auth
if robot:
os.environ[SERVICE_ACCOUNT_ENV] = robot
if not os.getenv(SERVICE_ACCOUNT_ENV) and upload:
logging.warning(
'Cannot --upload=%s, no active gcloud account.', upload)
raise ValueError('--upload requires --service-account')
if not os.getenv(SERVICE_ACCOUNT_ENV) and not upload:
logging.info('Will not upload results.')
return
if not os.path.isfile(os.environ[SERVICE_ACCOUNT_ENV]):
raise IOError(
'Cannot find service account credentials',
os.environ[SERVICE_ACCOUNT_ENV],
'Create service account and then create key at '
'https://console.developers.google.com/iam-admin/serviceaccounts/project', # pylint: disable=line-too-long
)
# this sometimes fails spuriously due to DNS flakiness, so we retry it
for _ in range(5):
try:
call([
'gcloud',
'auth',
'activate-service-account',
'--key-file=%s' % os.environ[SERVICE_ACCOUNT_ENV],
])
break
except subprocess.CalledProcessError:
pass
sleep_for = 1
logging.info(
'Retrying service account activation in %.2fs ...', sleep_for)
time.sleep(sleep_for)
else:
raise Exception(
"Failed to activate service account, exhausted retries")
try: # Old versions of gcloud may not support this value
account = call(
['gcloud', 'config', 'get-value', 'account'], output=True).strip()
except subprocess.CalledProcessError:
account = 'unknown'
logging.info('Will upload results to %s using %s', upload, account)
def setup_logging(path):
"""Initialize logging to screen and path."""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
build_log = logging.FileHandler(filename=path, mode='w')
build_log.setLevel(logging.INFO)
formatter = logging.Formatter(fmt, datefmt=datefmt)
build_log.setFormatter(formatter)
logging.getLogger('').addHandler(build_log)
return build_log
def get_artifacts_dir():
return os.getenv(
JOB_ARTIFACTS_ENV,
os.path.join(os.getenv(WORKSPACE_ENV, os.getcwd()), '_artifacts'))
def setup_magic_environment(job, call):
"""Set magic environment variables scripts currently expect."""
home = os.environ[HOME_ENV]
# TODO(fejta): jenkins sets these values. Consider migrating to using
# a secret volume instead and passing the path to this volume
# into bootstrap.py as a flag.
os.environ.setdefault(
GCE_KEY_ENV,
os.path.join(home, '.ssh/google_compute_engine'),
)
os.environ.setdefault(
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/google_compute_engine.pub'),
)
os.environ.setdefault(
'AWS_SSH_PRIVATE_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa'),
)
os.environ.setdefault(
'AWS_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa.pub'),
)
cwd = os.getcwd()
# TODO(fejta): jenkins sets WORKSPACE and pieces of our infra expect this
# value. Consider doing something else in the future.
# Furthermore, in the Jenkins and Prow environments, this is already set
# to something reasonable, but using cwd will likely cause all sorts of
# problems. Thus, only set this if we really need to.
if WORKSPACE_ENV not in os.environ:
os.environ[WORKSPACE_ENV] = cwd
# By default, Jenkins sets HOME to JENKINS_HOME, which is shared by all
# jobs. To avoid collisions, set it to the cwd instead, but only when
# running on Jenkins.
if os.getenv(HOME_ENV) and os.getenv(HOME_ENV) == os.getenv(JENKINS_HOME_ENV):
os.environ[HOME_ENV] = cwd
# TODO(fejta): jenkins sets JOB_ENV and pieces of our infra expect this
# value. Consider making everything below here agnostic to the
# job name.
if JOB_ENV not in os.environ:
os.environ[JOB_ENV] = job
elif os.environ[JOB_ENV] != job:
logging.warning('%s=%s (overrides %s)', JOB_ENV,
job, os.environ[JOB_ENV])
os.environ[JOB_ENV] = job
# TODO(fejta): Magic value to tell our test code not do upload started.json
# TODO(fejta): delete upload-to-gcs.sh and then this value.
os.environ[BOOTSTRAP_ENV] = 'yes'
# This helps prevent reuse of cloudsdk configuration. It also reduces the
# risk that running a job on a workstation corrupts the user's config.
os.environ[CLOUDSDK_ENV] = '%s/.config/gcloud' % cwd
# Set $ARTIFACTS to help migrate to podutils
os.environ[JOB_ARTIFACTS_ENV] = os.path.join(
os.getenv(WORKSPACE_ENV, os.getcwd()), '_artifacts')
# also make the artifacts dir if it doesn't exist yet
if not os.path.isdir(get_artifacts_dir()):
try:
os.makedirs(get_artifacts_dir())
except OSError as exc:
logging.info(
'cannot create %s, continue : %s', get_artifacts_dir(), exc)
# Try to set SOURCE_DATE_EPOCH based on the commit date of the tip of the
# tree.
# This improves cacheability of stamped binaries.
head_commit_date = commit_date('git', 'HEAD', call)
if head_commit_date:
os.environ[SOURCE_DATE_EPOCH_ENV] = head_commit_date.strip()
def job_args(args):
"""Converts 'a ${FOO} $bar' into 'a wildly different string'."""
return [os.path.expandvars(a) for a in args]
def job_script(job, scenario, extra_job_args):
"""Return path to script for job."""
with open(test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
if job.startswith('pull-security-kubernetes-'):
job = job.replace('pull-security-kubernetes-', 'pull-kubernetes-', 1)
config_json_args = []
if job in config:
job_config = config[job]
if not scenario:
scenario = job_config['scenario']
config_json_args = job_config.get('args', [])
elif not scenario:
raise ValueError('cannot find scenario for job', job)
cmd = test_infra('scenarios/%s.py' % scenario)
return [cmd] + job_args(config_json_args + extra_job_args)
def gubernator_uri(paths):
"""Return a gubernator link for this build."""
job = os.path.dirname(paths.build_log)
if job.startswith('gs:/'):
return job.replace('gs:/', GUBERNATOR, 1)
return job
@contextlib.contextmanager
def configure_ssh_key(ssh):
"""Creates a script for GIT_SSH that uses -i ssh if set."""
if not ssh: # Nothing to do
yield
return
try:
os.makedirs(os.path.join(os.environ[HOME_ENV], '.ssh'))
except OSError as exc:
logging.info('cannot create $HOME/.ssh, continue : %s', exc)
except KeyError as exc:
logging.info('$%s does not exist, continue : %s', HOME_ENV, exc)
# Create a script for use with GIT_SSH, which defines the program git uses
# during git fetch. In the future change this to GIT_SSH_COMMAND
# https://superuser.com/questions/232373/how-to-tell-git-which-private-key-to-use
with tempfile.NamedTemporaryFile(prefix='ssh', delete=False) as fp:
fp.write(
'#!/bin/sh\nssh -o StrictHostKeyChecking=no -i \'%s\' -F /dev/null "${@}"\n' % ssh)
try:
os.chmod(fp.name, 0500)
had = 'GIT_SSH' in os.environ
old = os.getenv('GIT_SSH')
os.environ['GIT_SSH'] = fp.name
yield
del os.environ['GIT_SSH']
if had:
os.environ['GIT_SSH'] = old
finally:
os.unlink(fp.name)
def setup_root(call, root, repos, ssh, git_cache, clean):
"""Create root dir, checkout repo and cd into resulting dir."""
if not os.path.exists(root):
os.makedirs(root)
root_dir = os.path.realpath(root)
logging.info('Root: %s', root_dir)
os.chdir(root_dir)
logging.info('cd to %s', root_dir)
# we want to checkout the correct repo for k-s/k but *everything*
# under the sun assumes $GOPATH/src/k8s.io/kubernetes so... :(
# after this method is called we've already computed the upload paths
# etc. so we can just swap it out for the desired path on disk
for repo, (branch, pull) in repos.items():
os.chdir(root_dir)
# for k-s/k these are different, for the rest they are the same
# TODO(bentheelder,cjwagner,stevekuznetsov): in the integrated
# prow checkout support remapping checkouts and kill this monstrosity
repo_path = repo
if repo == "github.com/kubernetes-security/kubernetes":
repo_path = "k8s.io/kubernetes"
logging.info(
'Checkout: %s %s to %s',
os.path.join(root_dir, repo),
pull and pull or branch,
os.path.join(root_dir, repo_path))
checkout(call, repo, repo_path, branch, pull, ssh, git_cache, clean)
# switch out the main repo for the actual path on disk if we are k-s/k
# from this point forward this is the path we want to use for everything
if repos.main == "github.com/kubernetes-security/kubernetes":
repos["k8s.io/kubernetes"], repos.main = repos[repos.main], "k8s.io/kubernetes"
if len(repos) > 1: # cd back into the primary repo
os.chdir(root_dir)
os.chdir(repos.main)
class Repos(dict):
"""{"repo": (branch, pull)} dict with a .main attribute."""
main = ''
def __setitem__(self, k, v):
if not self:
self.main = k
return super(Repos, self).__setitem__(k, v)
def parse_repos(args):
"""Convert --repo=foo=this,123:abc,555:ddd into a Repos()."""
repos = args.repo or {}
if not repos and not args.bare:
raise ValueError('--bare or --repo required')
ret = Repos()
if len(repos) != 1:
if args.pull:
raise ValueError(
'Multi --repo does not support --pull, use --repo=R=branch,p1,p2')
if args.branch:
raise ValueError(
'Multi --repo does not support --branch, use --repo=R=branch')
elif len(repos) == 1 and (args.branch or args.pull):
repo = repos[0]
if '=' in repo or ':' in repo:
raise ValueError(
'--repo cannot contain = or : with --branch or --pull')
ret[repo] = (args.branch, args.pull)
return ret
for repo in repos:
mat = re.match(
r'([^=]+)(=([^:,~^\s]+(:[0-9a-fA-F]+)?(:refs/changes/[0-9/]+)?(,|$))+)?$', repo)
if not mat:
raise ValueError('bad repo', repo, repos)
this_repo = mat.group(1)
if not mat.group(2):
ret[this_repo] = ('master', '')
continue
commits = mat.group(2)[1:].split(',')
if len(commits) == 1:
# Checking out a branch, possibly at a specific commit
ret[this_repo] = (commits[0], '')
continue
# Checking out one or more PRs
ret[this_repo] = ('', ','.join(commits))
return ret
def bootstrap(args):
"""Clone repo at pull/branch into root and run job script."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
job = args.job
repos = parse_repos(args)
upload = args.upload
build_log_path = os.path.abspath('build-log.txt')
build_log = setup_logging(build_log_path)
started = time.time()
if args.timeout:
end = started + args.timeout * 60
else:
end = 0
call = lambda *a, **kw: _call(end, *a, **kw)
gsutil = GSUtil(call)
logging.warning('bootstrap.py is deprecated!\n'
'Please migrate your job to podutils!\n'
'https://github.com/kubernetes/test-infra/blob/master/prow/pod-utilities.md'
)
if len(sys.argv) > 1:
logging.info('Args: %s', ' '.join(pipes.quote(a)
for a in sys.argv[1:]))
logging.info('Bootstrap %s...', job)
logging.info('Builder: %s', node())
if IMAGE_NAME_ENV in os.environ:
logging.info('Image: %s', os.environ[IMAGE_NAME_ENV])
build = build_name(started)
if upload:
# TODO(bentheelder, cjwager, stevekuznetsov): support the workspace
# repo not matching the upload repo in the shiny new init container
pull_ref_repos = [repo for repo in repos if repos[repo][1]]
if pull_ref_repos:
workspace_main, repos.main = repos.main, pull_ref_repos[0]
paths = pr_paths(upload, repos, job, build)
repos.main = workspace_main
else:
paths = ci_paths(upload, job, build)
logging.info('Gubernator results at %s', gubernator_uri(paths))
# TODO(fejta): Replace env var below with a flag eventually.
os.environ[GCS_ARTIFACTS_ENV] = paths.artifacts
version = 'unknown'
exc_type = None
try:
with configure_ssh_key(args.ssh):
setup_credentials(call, args.service_account, upload)
setup_root(call, args.root, repos, args.ssh,
args.git_cache, args.clean)
logging.info('Configure environment...')
setup_magic_environment(job, call)
setup_credentials(call, args.service_account, upload)
version = find_version(call) if repos else ''
logging.info('Start %s at %s...', build, version)
if upload:
start(gsutil, paths, started, node(), version, repos)
success = False
try:
call(job_script(job, args.scenario, args.extra_job_args))
logging.info('PASS: %s', job)
success = True
except subprocess.CalledProcessError:
logging.error('FAIL: %s', job)
except Exception: # pylint: disable=broad-except
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('unexpected error')
success = False
# jobs can change service account, always set it back before we upload logs
setup_credentials(call, args.service_account, upload)
if upload:
logging.info('Upload result and artifacts...')
logging.info('Gubernator results at %s', gubernator_uri(paths))
try:
finish(
gsutil, paths, success, get_artifacts_dir(),
build, version, repos, call
)
except subprocess.CalledProcessError: # Still try to upload build log
success = False
logging.getLogger('').removeHandler(build_log)
build_log.close()
if upload:
gsutil.copy_file(paths.build_log, build_log_path)
if exc_type:
raise exc_type, exc_value, exc_traceback # pylint: disable=raising-bad-type
if not success:
# TODO(fejta/spxtr): we should distinguish infra and non-infra problems
# by exit code and automatically retrigger after an infra-problem.
sys.exit(1)
def parse_args(arguments=None):
"""Parse arguments or sys.argv[1:]."""
if arguments is None:
arguments = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--root', default='.', help='Root dir to work with')
parser.add_argument(
'--timeout', type=float, default=0, help='Timeout in minutes if set')
parser.add_argument(
'--repo',
action='append',
help='Fetch the specified repositories, with the first one considered primary')
parser.add_argument(
'--bare',
action='store_true',
help='Do not check out a repository')
parser.add_argument('--job', required=True, help='Name of the job to run')
parser.add_argument(
'--upload',
help='Upload results here if set, requires --service-account')
parser.add_argument(
'--service-account',
help='Activate and use path/to/service-account.json if set.')
parser.add_argument(
'--ssh',
help='Use the ssh key to fetch the repository instead of https if set.')
parser.add_argument(
'--git-cache',
help='Location of the git cache.')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the git repo before running tests.')
# TODO(krzyzacy): later we should merge prow+config.json
# and utilize this flag
parser.add_argument(
'--scenario',
help='Scenario to use, if not specified in config.json')
# split out args after `--` as job arguments
extra_job_args = []
if '--' in arguments:
index = arguments.index('--')
arguments, extra_job_args = arguments[:index], arguments[index+1:]
args = parser.parse_args(arguments)
setattr(args, 'extra_job_args', extra_job_args)
# --pull is deprecated, use --repo=k8s.io/foo=master:abcd,12:ef12,45:ff65
setattr(args, 'pull', None)
# --branch is deprecated, use --repo=k8s.io/foo=master
setattr(args, 'branch', None)
if bool(args.repo) == bool(args.bare):
raise argparse.ArgumentTypeError(
'Expected --repo xor --bare:', args.repo, args.bare)
return args
if __name__ == '__main__':
ARGS = parse_args()
bootstrap(ARGS)
| apache-2.0 |
jspargo/AneMo | thermo/flask/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| gpl-2.0 |
hsum/sqlalchemy | test/sql/test_unicode.py | 28 | 6087 | # coding: utf-8
"""verrrrry basic unicode column name testing"""
from sqlalchemy import *
from sqlalchemy.testing import fixtures, engines, eq_
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.util import u, ue
class UnicodeSchemaTest(fixtures.TestBase):
__requires__ = ('unicode_ddl',)
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table(u('unitable1'), metadata,
Column(u('méil'), Integer, primary_key=True),
Column(ue('\u6e2c\u8a66'), Integer),
test_needs_fk=True,
)
t2 = Table(
u('Unitéble2'),
metadata,
Column(
u('méil'),
Integer,
primary_key=True,
key="a"),
Column(
ue('\u6e2c\u8a66'),
Integer,
ForeignKey(
u('unitable1.méil')),
key="b"),
test_needs_fk=True,
)
# Few DBs support Unicode foreign keys
if testing.against('sqlite'):
t3 = Table(ue('\u6e2c\u8a66'), metadata,
Column(ue('\u6e2c\u8a66_id'), Integer, primary_key=True,
autoincrement=False),
Column(ue('unitable1_\u6e2c\u8a66'), Integer,
ForeignKey(ue('unitable1.\u6e2c\u8a66'))
),
Column(u('Unitéble2_b'), Integer,
ForeignKey(u('Unitéble2.b'))
),
Column(ue('\u6e2c\u8a66_self'), Integer,
ForeignKey(ue('\u6e2c\u8a66.\u6e2c\u8a66_id'))
),
test_needs_fk=True,
)
else:
t3 = Table(ue('\u6e2c\u8a66'), metadata,
Column(ue('\u6e2c\u8a66_id'), Integer, primary_key=True,
autoincrement=False),
Column(ue('unitable1_\u6e2c\u8a66'), Integer),
Column(u('Unitéble2_b'), Integer),
Column(ue('\u6e2c\u8a66_self'), Integer),
test_needs_fk=True,
)
metadata.create_all()
@engines.close_first
def teardown(self):
if metadata.tables:
t3.delete().execute()
t2.delete().execute()
t1.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_insert(self):
t1.insert().execute({u('méil'): 1, ue('\u6e2c\u8a66'): 5})
t2.insert().execute({u('a'): 1, u('b'): 1})
t3.insert().execute({ue('\u6e2c\u8a66_id'): 1,
ue('unitable1_\u6e2c\u8a66'): 5,
u('Unitéble2_b'): 1,
ue('\u6e2c\u8a66_self'): 1})
assert t1.select().execute().fetchall() == [(1, 5)]
assert t2.select().execute().fetchall() == [(1, 1)]
assert t3.select().execute().fetchall() == [(1, 5, 1, 1)]
def test_col_targeting(self):
t1.insert().execute({u('méil'): 1, ue('\u6e2c\u8a66'): 5})
t2.insert().execute({u('a'): 1, u('b'): 1})
t3.insert().execute({ue('\u6e2c\u8a66_id'): 1,
ue('unitable1_\u6e2c\u8a66'): 5,
u('Unitéble2_b'): 1,
ue('\u6e2c\u8a66_self'): 1})
row = t1.select().execute().first()
eq_(row[t1.c[u('méil')]], 1)
eq_(row[t1.c[ue('\u6e2c\u8a66')]], 5)
row = t2.select().execute().first()
eq_(row[t2.c[u('a')]], 1)
eq_(row[t2.c[u('b')]], 1)
row = t3.select().execute().first()
eq_(row[t3.c[ue('\u6e2c\u8a66_id')]], 1)
eq_(row[t3.c[ue('unitable1_\u6e2c\u8a66')]], 5)
eq_(row[t3.c[u('Unitéble2_b')]], 1)
eq_(row[t3.c[ue('\u6e2c\u8a66_self')]], 1)
def test_reflect(self):
t1.insert().execute({u('méil'): 2, ue('\u6e2c\u8a66'): 7})
t2.insert().execute({u('a'): 2, u('b'): 2})
t3.insert().execute({ue('\u6e2c\u8a66_id'): 2,
ue('unitable1_\u6e2c\u8a66'): 7,
u('Unitéble2_b'): 2,
ue('\u6e2c\u8a66_self'): 2})
meta = MetaData(testing.db)
tt1 = Table(t1.name, meta, autoload=True)
tt2 = Table(t2.name, meta, autoload=True)
tt3 = Table(t3.name, meta, autoload=True)
tt1.insert().execute({u('méil'): 1, ue('\u6e2c\u8a66'): 5})
tt2.insert().execute({u('méil'): 1, ue('\u6e2c\u8a66'): 1})
tt3.insert().execute({ue('\u6e2c\u8a66_id'): 1,
ue('unitable1_\u6e2c\u8a66'): 5,
u('Unitéble2_b'): 1,
ue('\u6e2c\u8a66_self'): 1})
self.assert_(
tt1.select(
order_by=desc(
u('méil'))).execute().fetchall() == [
(2, 7), (1, 5)])
self.assert_(
tt2.select(
order_by=desc(
u('méil'))).execute().fetchall() == [
(2, 2), (1, 1)])
self.assert_(tt3.select(order_by=desc(ue('\u6e2c\u8a66_id'))).
execute().fetchall() ==
[(2, 7, 2, 2), (1, 5, 1, 1)])
def test_repr(self):
m = MetaData()
t = Table(
ue('\u6e2c\u8a66'),
m,
Column(
ue('\u6e2c\u8a66_id'),
Integer))
# I hardly understand what's going on with the backslashes in
# this one on py2k vs. py3k
eq_(repr(t),
("Table('\\u6e2c\\u8a66', MetaData(bind=None), "
"Column('\\u6e2c\\u8a66_id', Integer(), table=<\u6e2c\u8a66>), "
"schema=None)"))
| mit |
ActiveState/code | recipes/Python/170995_Manipulating_infinite_lists_implemented/recipe-170995.py | 1 | 3490 | """Collection of useful functions which work on infinite lists.
The infinite lists are actually the generator objects. Note that
the functions will have side effects on the passed-in gLists.
"""
from __future__ import generators
def gInfinite(obj):
"""Return infinite list of repeated objects obj"""
while 1:
yield obj
gNone = gInfinite(None)
def gJoin(gl1, gl2):
"""Return gl1+gl2, i.e [gl1[0],...,gl1[n],gl2[0],...]
Apparently only useful when gl1 is finite.
"""
for x in gl1:
yield x
for x in gl2:
yield x
def gCon(x, xs):
"""Return [x, xs[0], xs[1], ...]"""
yield x
xs = iter(xs) # make sure it also works for ordinary list
while 1:
yield xs.next()
def gRange(start=0,step=1,stop=None):
"""Generalized version of range() - could be infinite
Note the difference in the order of arguments from those
of range().
"""
if stop is None:
x = int(start)
step = int(step)
while 1:
yield x
x += step
else:
for x in range(start, stop, step):
yield x
def gMap(f, *gLists):
"""Generalized version of map() - work on infinite list
Work differently from map(), stops when the end of the shortest
gList is reached.
"""
if f is None:
f = lambda *x: x
gLists = map(iter, gLists) # make sure it also works for ordinary list
while 1:
yield f(*[gl.next() for gl in gLists])
def gZip(*gLists):
"""Generalized version of zip() - work on infinite list"""
for x in gMap(None, *gLists):
yield x
def gFilter(f, gList):
"""Generalized version of filter() - work on infinite list"""
if f is None:
f = lambda x: x
for x in gList:
# WARNING: may fall into forever loop
# without yielding anything if f(x) is
# always false from a certain x onwards
if f(x):
yield x
def gCompre(f, gList, cond = lambda *x: 1):
"""List Comprehension
[f(*x) for x in gList if cond(*x)]
"""
for x in gList:
# WARNING: may fall into forever loop
# without yielding anything if f(*x) is
# always false from a certain x onwards
if cond(*x):
yield f(*x)
def pList(gList, limit=20):
"""Return partial ordinary list of gList."""
if type(gList) is type(gNone):
return [pList(x[0]) for x in zip(gList, range(limit))]
else:
return gList
if __name__=='__main__':
print pList(gMap(lambda x,y,z: x+y+z, gRange(1), gRange(2,2), gRange(3,3)))
# -> [1+2+3, 2+4+6, 3+6+9, ...]
def f(x,y):
return '%s%i' % (x,y)
def g(x,y):
return y%3==0
print pList(gCompre(f, gZip(gInfinite('A'), gRange(2)), g))
# or pList(gCompre(lambda x,y: '%s%i' % (x,y), gZip(gInfinite('A'), gRange(2)), lambda x,y: y%3==0))
# -> ['A3', 'A6', 'A9', ...]
def sieve(gList):
"""Sieve of Eratosthene"""
x = gList.next()
xs = sieve(gFilter(lambda y: y % x != 0, gList))
for y in gCon(x, xs):
yield y
import sys
sys.setrecursionlimit(sys.maxint) # needed for bigger lists of primes
primes = sieve(gRange(2)) # infinite list of primes
print pList(primes, 100) # print the first 100 primes
print pList(primes, 500) # print subsequent 500 primes
# gList of gLists
print pList(gMap(gRange, gRange()))
| mit |
jiriprochazka/lnst | obsolete/netconfig.py | 1 | 6220 | #! /usr/bin/env python
"""
Netconfig tool
Copyright 2011 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
jpirko@redhat.com (Jiri Pirko)
"""
import getopt
import sys
import logging
import re
import os
from pprint import pprint
from NetConfig.NetConfig import NetConfig
from NetConfig.NetConfigDevice import NetConfigDeviceAllCleanup
from NetConfig.NetConfigDevNames import NetConfigDevNames
from NetTest.NetTestParse import NetConfigParse
from NetTest.NetTestParse import NetMachineConfigParse
from Common.XmlProcessing import XmlDomTreeInit
from Common.Logs import Logs
def usage():
"""
Print usage of this app
"""
print "Usage: netconfig.py [OPTION...] ACTION"
print ""
print "ACTION = [up | down | dump | cleanup | test]"
print ""
print " -d, --debug emit debugging messages"
print " -h, --help print this message"
print " -c, --config=FILE use this net configuration file"
print " -m, --machine-config=FILE use this machine configuration file"
sys.exit()
def prepare_machine_config(machine_file):
tree_init = XmlDomTreeInit()
dom = tree_init.parse_file(machine_file)
machine_dom = dom.getElementsByTagName("netmachineconfig")[0]
data = {"info":{}, "netdevices": {}, "netconfig": {}}
machine_parse = NetMachineConfigParse()
machine_parse.disable_events()
machine_parse.set_recipe(data)
machine_parse.set_machine(0, data)
machine_parse.parse(machine_dom)
return data
def prepare_netconfig(machine_file, config_file):
tree_init = XmlDomTreeInit()
data = prepare_machine_config(machine_file)
dom = tree_init.parse_file(config_file)
config_dom = dom.getElementsByTagName("netconfig")[0]
config_parse = NetConfigParse()
config_parse.disable_events()
config_parse.set_recipe(data)
config_parse.set_machine(0, data)
config_parse.parse(config_dom)
netconfig = NetConfig()
for key, entry in data["netconfig"].iteritems():
netconfig.add_interface_config(key, entry)
return netconfig
def netmachineconfig_to_xml(machine_data):
info = machine_data["info"]
hostname = ""
rootpass = ""
rpcport = ""
if "hostname" in info:
hostname = "hostname=\"%s\" " % info["hostname"]
if "rootpass" in info:
rootpass = "rootpass=\"%s\" " % info["rootpass"]
if "rpcport" in info:
rpcport = "rpcport=\"%s\" " % info["rpcport"]
info_tag = " <info %s%s%s/>\n" % (hostname, rootpass, rpcport)
devices = ""
for phys_id, netdev in machine_data["netdevices"].iteritems():
pid = "phys_id=\"%s\" " % phys_id
dev_type = ""
name = ""
hwaddr = ""
if "type" in netdev:
dev_type = "type=\"%s\" " % netdev["type"]
if "name" in netdev:
name = "name=\"%s\" " % netdev["name"]
if "hwaddr" in netdev:
hwaddr = "hwaddr=\"%s\" " % netdev["hwaddr"]
device_tag = " <netdevice %s%s%s%s/>\n" % (pid, dev_type,
name, hwaddr)
devices += device_tag
return "<netmachineconfig>\n" + info_tag + devices + "</netmachineconfig>"
def main():
"""
Main function
"""
try:
opts, args = getopt.getopt(
sys.argv[1:],
"dhc:m:a:",
["debug", "help", "config=", "machine-config=", "action="]
)
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit()
debug = False
config_path = None
machine_config_path = None
for opt, arg in opts:
if opt in ("-d", "--debug"):
debug = True
elif opt in ("-h", "--help"):
usage()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("-m", "--machine-config"):
machine_config_path = arg
Logs(debug)
logging.info("Started")
if not args:
logging.error("No action command passed")
usage();
action = args[0]
if action == "cleanup":
NetConfigDeviceAllCleanup()
return
if not machine_config_path:
logging.error("No machine config xml file passed")
usage();
machine_config_path = os.path.expanduser(machine_config_path)
if action == "refresh":
logging.info("Refreshing machine config")
machine_data = prepare_machine_config(machine_config_path)
dev_names = NetConfigDevNames()
for dev_id, netdev in machine_data["netdevices"].iteritems():
if "name" in netdev:
del netdev["name"]
dev_names.assign_name_by_scan(dev_id, netdev)
output = netmachineconfig_to_xml(machine_data)
handle = open(machine_config_path, "w")
handle.write(output)
handle.close()
return
if not config_path:
logging.error("No net config file/dir passed")
usage();
config_path = os.path.expanduser(config_path)
if action == "test":
'''
Go through specified directory and use all xmls and configs
'''
for root, dirs, files in os.walk(config_path):
for file_name in files:
config_file = os.path.join(config_path, file_name)
if not re.match(r'^.*\.xml$', config_file):
continue
logging.info("Processing config file \"%s\"", config_file)
net_config = prepare_netconfig(machine_config_path,
config_file)
net_config.configure_all()
net_config.deconfigure_all()
return
net_config = prepare_netconfig(machine_config_path, config_path)
if action == "up":
net_config.configure_all()
elif action == "down":
net_config.deconfigure_all()
elif action == "dump":
pprint(net_config.dump_config())
else:
logging.error("unknown action \"%s\"" % action)
if __name__ == "__main__":
main()
| gpl-2.0 |
napalm-automation/napalm | napalm/base/netmiko_helpers.py | 1 | 1807 | # The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import inspect
from netmiko import BaseConnection
def netmiko_args(optional_args):
"""Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call.
"""
fields = inspect.getfullargspec(BaseConnection.__init__)
args = fields[0]
defaults = fields[3]
check_self = args.pop(0)
if check_self != "self":
raise ValueError("Error processing Netmiko arguments")
netmiko_argument_map = dict(zip(args, defaults))
# Netmiko arguments that are integrated into NAPALM already
netmiko_filter = ["ip", "host", "username", "password", "device_type", "timeout"]
# Filter out all of the arguments that are integrated into NAPALM
for k in netmiko_filter:
netmiko_argument_map.pop(k)
# Check if any of these arguments were passed in as NAPALM optional_args
netmiko_optional_args = {}
for k, v in netmiko_argument_map.items():
try:
netmiko_optional_args[k] = optional_args[k]
except KeyError:
pass
# Return these arguments for use with establishing Netmiko SSH connection
return netmiko_optional_args
| apache-2.0 |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/migrate/test_migrate_vm_console_access.py | 1 | 1292 | '''
New Integration test for testing console access after vm migration between hosts.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.account_operations as acc_ops
vm = None
test_stub = test_lib.lib_get_specific_stub()
def test():
global vm
vm = test_stub.create_vr_vm('migrate_vm', 'imageName_s', 'l3VlanNetwork2')
vm.check()
test_stub.migrate_vm_to_random_host(vm)
vm.check()
session_uuid = acc_ops.login_as_admin()
console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid, session_uuid)
if test_lib.lib_network_check(console.hostIp, console.port):
test_util.test_logger('[vm:] %s console on %s:%s is connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
else:
test_util.test_fail('[vm:] %s console on %s:%s is not connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
acc_ops.logout(session_uuid)
vm.destroy()
test_util.test_pass('Migrate VM Console Access Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
| apache-2.0 |
fujunwei/chromium-crosswalk | tools/metrics/common/diff_util.py | 103 | 1626 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for prompting user if changes automatically applied to some
user-managed files are correct.
"""
import logging
import os
import webbrowser
from difflib import HtmlDiff
from tempfile import NamedTemporaryFile
def PromptUserToAcceptDiff(old_text, new_text, prompt):
"""Displays a difference in two strings (old and new file contents) to the
user and asks whether the new version is acceptable.
Args:
old_text: A string containing old file contents.
new_text: A string containing new file contents.
prompt: Text that should be displayed to the user, asking whether the new
file contents should be accepted.
Returns:
True is user accepted the changes or there were no changes, False otherwise.
"""
logging.info('Computing diff...')
if old_text == new_text:
logging.info('No changes detected')
return True
html_diff = HtmlDiff(wrapcolumn=80).make_file(
old_text.splitlines(), new_text.splitlines(), fromdesc='Original',
todesc='Updated', context=True, numlines=5)
temp = NamedTemporaryFile(suffix='.html', delete=False)
try:
temp.write(html_diff)
temp.close() # Close the file so the browser process can access it.
webbrowser.open('file://' + temp.name)
print prompt
response = raw_input('(Y/n): ').strip().lower()
finally:
temp.close() # May be called on already closed file.
os.remove(temp.name)
return response == 'y' or response == ''
| bsd-3-clause |
KousikaGanesh/purchaseandInventory | openerp/addons/document_webdav/__init__.py | 58 | 1119 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import webdav
import webdav_server
import document_webdav
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xray/xray | xarray/core/options.py | 1 | 5201 | import warnings
DISPLAY_WIDTH = "display_width"
ARITHMETIC_JOIN = "arithmetic_join"
ENABLE_CFTIMEINDEX = "enable_cftimeindex"
FILE_CACHE_MAXSIZE = "file_cache_maxsize"
WARN_FOR_UNCLOSED_FILES = "warn_for_unclosed_files"
CMAP_SEQUENTIAL = "cmap_sequential"
CMAP_DIVERGENT = "cmap_divergent"
KEEP_ATTRS = "keep_attrs"
DISPLAY_STYLE = "display_style"
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: "inner",
ENABLE_CFTIMEINDEX: True,
FILE_CACHE_MAXSIZE: 128,
WARN_FOR_UNCLOSED_FILES: False,
CMAP_SEQUENTIAL: "viridis",
CMAP_DIVERGENT: "RdBu_r",
KEEP_ATTRS: "default",
DISPLAY_STYLE: "html",
}
_JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
_DISPLAY_OPTIONS = frozenset(["text", "html"])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),
KEEP_ATTRS: lambda choice: choice in [True, False, "default"],
DISPLAY_STYLE: _DISPLAY_OPTIONS.__contains__,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):
warnings.warn(
"The enable_cftimeindex option is now a no-op "
"and will be removed in a future version of xarray.",
FutureWarning,
)
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,
}
def _get_keep_attrs(default):
global_choice = OPTIONS["keep_attrs"]
if global_choice == "default":
return default
elif global_choice in [True, False]:
return global_choice
else:
raise ValueError(
"The global option keep_attrs must be one of" " True, False or 'default'."
)
class set_options:
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``warn_for_unclosed_files``: whether or not to issue a warning when
unclosed files are deallocated (default False). This is mostly useful
for debugging.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``keep_attrs``: rule for whether to keep attributes on xarray
Datasets/dataarrays after operations. Either ``True`` to always keep
attrs, ``False`` to always discard them, or ``'default'`` to use original
logic that attrs should only be kept in unambiguous circumstances.
Default: ``'default'``.
- ``display_style``: display style to use in jupyter for xarray objects.
Default: ``'text'``. Other options are ``'html'``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == ARITHMETIC_JOIN:
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == DISPLAY_STYLE:
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
| apache-2.0 |
2uller/LotF | App/Lib/idlelib/OutputWindow.py | 2 | 4725 | from Tkinter import *
from idlelib.EditorWindow import EditorWindow
import re
import tkMessageBox
from idlelib import IOBinding
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
# Tk assumes that byte strings are Latin-1;
# we assume that they are in the locale's encoding
if isinstance(s, str):
try:
s = unicode(s, IOBinding.encoding)
except UnicodeError:
# some other encoding; let Tcl deal with it
pass
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Go to file/line", "<<goto-file-line>>", None),
]
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self._file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self._file_line_helper(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
master=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def _file_line_helper(self, line):
for prog in self.file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except IOError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
| gpl-2.0 |
katiecheng/Bombolone | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| bsd-3-clause |
jelugbo/ddi | lms/djangoapps/courseware/tests/test_submitting_problems.py | 8 | 47651 | # -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
# text processing dependencies
import json
import os
from textwrap import dedent
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from courseware import grades
from courseware.models import StudentModule
#import factories and parent testcase modules
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from lms.lib.xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
from student.models import anonymous_id_for_user
from xmodule.partitions.partitions import Group, UserPartition
from user_api.tests.factories import UserCourseTagFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that a course gets graded properly.
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
super(TestSubmittingProblems, self).setUp(create_user=False)
# Create course
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
assert self.course, "Couldn't load course %r" % self.COURSE_NAME
# create a test student
self.student = 'view@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_{}_'.format(problem_location.html_id())
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not(hasattr(self, 'chapter')):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
return grades.grade(self.student_user, fake_request, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
progress_summary = grades.progress_summary(
self.student_user, fake_request, self.course
)
return progress_summary
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <points on problem_2>, ..., <points on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(),
anonymous_id_for_user(self.student_user, self.course.id)
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
'Correct': 2
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Incorrect': 1
},
}
)
class TestConditionalContent(TestSubmittingProblems):
"""
Check that conditional content works correctly with grading.
"""
def setUp(self):
"""
Set up a simple course with a grading policy, a UserPartition, and 2 sections, both graded as "homework".
One section is pre-populated with a problem (with 2 inputs), visible to all students.
The second section is empty. Test cases should add conditional content to it.
"""
super(TestConditionalContent, self).setUp()
self.user_partition_group_0 = 0
self.user_partition_group_1 = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_0, 'alpha'),
Group(self.user_partition_group_1, 'beta')
]
)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_SLUG,
user_partitions=[self.partition]
)
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 2,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
self.add_grading_policy(grading_policy)
self.homework_all = self.add_graded_section_to_course('homework1')
self.p1_all_html_id = self.add_dropdown_to_section(self.homework_all.location, 'H1P1', 2).location.html_id()
self.homework_conditional = self.add_graded_section_to_course('homework2')
def split_setup(self, user_partition_group):
"""
Setup for tests using split_test module. Creates a split_test instance as a child of self.homework_conditional
with 2 verticals in it, and assigns self.student_user to the specified user_partition_group.
The verticals are returned.
"""
vertical_0_url = self.course.id.make_usage_key("vertical", "split_test_vertical_0")
vertical_1_url = self.course.id.make_usage_key("vertical", "split_test_vertical_1")
group_id_to_child = {}
for index, url in enumerate([vertical_0_url, vertical_1_url]):
group_id_to_child[str(index)] = url
split_test = ItemFactory.create(
parent_location=self.homework_conditional.location,
category="split_test",
display_name="Split test",
user_partition_id='0',
group_id_to_child=group_id_to_child,
)
vertical_0 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=vertical_0_url,
)
vertical_1 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=vertical_1_url,
)
# Now add the student to the specified group.
UserCourseTagFactory(
user=self.student_user,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(user_partition_group)
)
return vertical_0, vertical_1
def split_different_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems for each group
(so both groups do have graded content, though it is different).
Group 0 has 2 problems, worth 1 and 3 points respectively.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 1 point out of a possible 2 points in the section.
"""
vertical_0, vertical_1 = self.split_setup(user_partition_group)
# Group 0 will have 2 problems in the section, worth a total of 4 points.
self.add_dropdown_to_section(vertical_0.location, 'H2P1', 1).location.html_id()
self.add_dropdown_to_section(vertical_0.location, 'H2P2', 3).location.html_id()
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1', 1).location.html_id()
# Submit answers for problem in Section 1, which is visible to all students.
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
def test_split_different_problems_group_0(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 0.
"""
self.split_different_problems_setup(self.user_partition_group_0)
self.submit_question_answer('H2P1', {'2_1': 'Correct'})
self.submit_question_answer('H2P2', {'2_1': 'Correct', '2_2': 'Incorrect', '2_3': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 2.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 3.0])
# Grade percent is .63. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = (1.0 + 2.0) / 4
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_different_problems_group_1(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 1.
"""
self.split_different_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def split_one_group_no_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems on for one group.
Group 0 has no problems.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 2 points out of a possible 2 points in the section.
"""
[_, vertical_1] = self.split_setup(user_partition_group)
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1', 1).location.html_id()
self.submit_question_answer('H1P1', {'2_1': 'Correct'})
def test_split_one_group_no_problems_group_0(self):
"""
Tests what happens when a given group has no problems in it (students receive 0 for that section).
"""
self.split_one_group_no_problems_setup(self.user_partition_group_0)
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0])
# Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 0.0
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_one_group_no_problems_group_1(self):
"""
Verifies students in the group that DOES have a problem receive a score for their problem.
"""
self.split_one_group_no_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
| agpl-3.0 |
bsipocz/astropy | astropy/wcs/docstrings.py | 4 | 66390 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# It gets to be really tedious to type long docstrings in ANSI C
# syntax (since multi-line string literals are not valid).
# Therefore, the docstrings are written here in doc/docstrings.py,
# which are then converted by setup.py into docstrings.h, which is
# included by pywcs.c
__all__ = ['TWO_OR_MORE_ARGS', 'RETURNS', 'ORIGIN', 'RA_DEC_ORDER']
def _fix(content, indent=0):
lines = content.split('\n')
indent = '\n' + ' ' * indent
return indent.join(lines)
def TWO_OR_MORE_ARGS(naxis, indent=0):
return _fix(
"""args : flexible
There are two accepted forms for the positional arguments:
- 2 arguments: An *N* x *{}* array of coordinates, and an
*origin*.
- more than 2 arguments: An array for each axis, followed by
an *origin*. These arrays must be broadcastable to one
another.
Here, *origin* is the coordinate in the upper left corner of the
image. In FITS and Fortran standards, this is 1. In Numpy and C
standards this is 0.
""".format(naxis), indent)
def RETURNS(out_type, indent=0):
return _fix("""result : array
Returns the {}. If the input was a single array and
origin, a single array is returned, otherwise a tuple of arrays is
returned.""".format(out_type), indent)
def ORIGIN(indent=0):
return _fix(
"""
origin : int
Specifies the origin of pixel values. The Fortran and FITS
standards use an origin of 1. Numpy and C use array indexing with
origin at 0.
""", indent)
def RA_DEC_ORDER(indent=0):
return _fix(
"""
ra_dec_order : bool, optional
When `True` will ensure that world coordinates are always given
and returned in as (*ra*, *dec*) pairs, regardless of the order of
the axes specified by the in the ``CTYPE`` keywords. Default is
`False`.
""", indent)
a = """
``double array[a_order+1][a_order+1]`` Focal plane transformation
matrix.
The `SIP`_ ``A_i_j`` matrix used for pixel to focal plane
transformation.
Its values may be changed in place, but it may not be resized, without
creating a new `~astropy.wcs.Sip` object.
"""
a_order = """
``int`` (read-only) Order of the polynomial (``A_ORDER``).
"""
all_pix2world = """
all_pix2world(pixcrd, origin) -> ``double array[ncoord][nelem]``
Transforms pixel coordinates to world coordinates.
Does the following:
- Detector to image plane correction (if present)
- SIP distortion correction (if present)
- FITS WCS distortion correction (if present)
- wcslib "core" WCS transformation
The first three (the distortion corrections) are done in parallel.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{}
Returns
-------
world : double array[ncoord][nelem]
Returns an array of world coordinates.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(ORIGIN())
alt = """
``str`` Character code for alternate coordinate descriptions.
For example, the ``"a"`` in keyword names such as ``CTYPEia``. This
is a space character for the primary coordinate description, or one of
the 26 upper-case letters, A-Z.
"""
ap = """
``double array[ap_order+1][ap_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``AP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
ap_order = """
``int`` (read-only) Order of the polynomial (``AP_ORDER``).
"""
axis_types = """
``int array[naxis]`` An array of four-digit type codes for each axis.
- First digit (i.e. 1000s):
- 0: Non-specific coordinate type.
- 1: Stokes coordinate.
- 2: Celestial coordinate (including ``CUBEFACE``).
- 3: Spectral coordinate.
- Second digit (i.e. 100s):
- 0: Linear axis.
- 1: Quantized axis (``STOKES``, ``CUBEFACE``).
- 2: Non-linear celestial axis.
- 3: Non-linear spectral axis.
- 4: Logarithmic axis.
- 5: Tabular axis.
- Third digit (i.e. 10s):
- 0: Group number, e.g. lookup table number
- The fourth digit is used as a qualifier depending on the axis type.
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables: the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
have its type set to -1 and generate an error.
"""
b = """
``double array[b_order+1][b_order+1]`` Pixel to focal plane
transformation matrix.
The `SIP`_ ``B_i_j`` matrix used for pixel to focal plane
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
b_order = """
``int`` (read-only) Order of the polynomial (``B_ORDER``).
"""
bounds_check = """
bounds_check(pix2world, world2pix)
Enable/disable bounds checking.
Parameters
----------
pix2world : bool, optional
When `True`, enable bounds checking for the pixel-to-world (p2x)
transformations. Default is `True`.
world2pix : bool, optional
When `True`, enable bounds checking for the world-to-pixel (s2x)
transformations. Default is `True`.
Notes
-----
Note that by default (without calling `bounds_check`) strict bounds
checking is enabled.
"""
bp = """
``double array[bp_order+1][bp_order+1]`` Focal plane to pixel
transformation matrix.
The `SIP`_ ``BP_i_j`` matrix used for focal plane to pixel
transformation. Its values may be changed in place, but it may not be
resized, without creating a new `~astropy.wcs.Sip` object.
"""
bp_order = """
``int`` (read-only) Order of the polynomial (``BP_ORDER``).
"""
cd = """
``double array[naxis][naxis]`` The ``CDi_ja`` linear transformation
matrix.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
cdelt = """
``double array[naxis]`` Coordinate increments (``CDELTia``) for each
coord axis.
If a ``CDi_ja`` linear transformation matrix is present, a warning is
raised and `~astropy.wcs.Wcsprm.cdelt` is ignored. The ``CDi_ja``
matrix may be deleted by::
del wcs.wcs.cd
An undefined value is represented by NaN.
"""
cdfix = """
cdfix()
Fix erroneously omitted ``CDi_ja`` keywords.
Sets the diagonal element of the ``CDi_ja`` matrix to unity if all
``CDi_ja`` keywords associated with a given axis were omitted.
According to Paper I, if any ``CDi_ja`` keywords at all are given in a
FITS header then those not given default to zero. This results in a
singular matrix with an intersecting row and column of zeros.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cel_offset = """
``boolean`` Is there an offset?
If `True`, an offset will be applied to ``(x, y)`` to force ``(x, y) =
(0, 0)`` at the fiducial point, (phi_0, theta_0). Default is `False`.
"""
celfix = """
Translates AIPS-convention celestial projection types, ``-NCP`` and
``-GLS``.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
cname = """
``list of strings`` A list of the coordinate axis names, from
``CNAMEia``.
"""
colax = """
``int array[naxis]`` An array recording the column numbers for each
axis in a pixel list.
"""
colnum = """
``int`` Column of FITS binary table associated with this WCS.
Where the coordinate representation is associated with an image-array
column in a FITS binary table, this property may be used to record the
relevant column number.
It should be set to zero for an image header or pixel list.
"""
compare = """
compare(other, cmp=0, tolerance=0.0)
Compare two Wcsprm objects for equality.
Parameters
----------
other : Wcsprm
The other Wcsprm object to compare to.
cmp : int, optional
A bit field controlling the strictness of the comparison. When 0,
(the default), all fields must be identical.
The following constants may be or'ed together to loosen the
comparison.
- ``WCSCOMPARE_ANCILLARY``: Ignores ancillary keywords that don't
change the WCS transformation, such as ``DATE-OBS`` or
``EQUINOX``.
- ``WCSCOMPARE_TILING``: Ignore integral differences in
``CRPIXja``. This is the 'tiling' condition, where two WCSes
cover different regions of the same map projection and align on
the same map grid.
- ``WCSCOMPARE_CRPIX``: Ignore any differences at all in
``CRPIXja``. The two WCSes cover different regions of the same
map projection but may not align on the same grid map.
Overrides ``WCSCOMPARE_TILING``.
tolerance : float, optional
The amount of tolerance required. For example, for a value of
1e-6, all floating-point values in the objects must be equal to
the first 6 decimal places. The default value of 0.0 implies
exact equality.
Returns
-------
equal : bool
"""
convert = """
convert(array)
Perform the unit conversion on the elements of the given *array*,
returning an array of the same shape.
"""
coord = """
``double array[K_M]...[K_2][K_1][M]`` The tabular coordinate array.
Has the dimensions::
(K_M, ... K_2, K_1, M)
(see `~astropy.wcs.Tabprm.K`) i.e. with the `M` dimension
varying fastest so that the `M` elements of a coordinate vector are
stored contiguously in memory.
"""
copy = """
Creates a deep copy of the WCS object.
"""
cpdis1 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS1``.
"""
cpdis2 = """
`~astropy.wcs.DistortionLookupTable`
The pre-linear transformation distortion lookup table, ``CPDIS2``.
"""
crder = """
``double array[naxis]`` The random error in each coordinate axis,
``CRDERia``.
An undefined value is represented by NaN.
"""
crota = """
``double array[naxis]`` ``CROTAia`` keyvalues for each coordinate
axis.
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
crpix = """
``double array[naxis]`` Coordinate reference pixels (``CRPIXja``) for
each pixel axis.
"""
crval = """
``double array[naxis]`` Coordinate reference values (``CRVALia``) for
each coordinate axis.
"""
crval_tabprm = """
``double array[M]`` Index values for the reference pixel for each of
the tabular coord axes.
"""
csyer = """
``double array[naxis]`` The systematic error in the coordinate value
axes, ``CSYERia``.
An undefined value is represented by NaN.
"""
ctype = """
``list of strings[naxis]`` List of ``CTYPEia`` keyvalues.
The `~astropy.wcs.Wcsprm.ctype` keyword values must be in upper case
and there must be zero or one pair of matched celestial axis types,
and zero or one spectral axis.
"""
cubeface = """
``int`` Index into the ``pixcrd`` (pixel coordinate) array for the
``CUBEFACE`` axis.
This is used for quadcube projections where the cube faces are stored
on a separate axis.
The quadcube projections (``TSC``, ``CSC``, ``QSC``) may be
represented in FITS in either of two ways:
- The six faces may be laid out in one plane and numbered as
follows::
0
4 3 2 1 4 3 2
5
Faces 2, 3 and 4 may appear on one side or the other (or both).
The world-to-pixel routines map faces 2, 3 and 4 to the left but
the pixel-to-world routines accept them on either side.
- The ``COBE`` convention in which the six faces are stored in a
three-dimensional structure using a ``CUBEFACE`` axis indexed
from 0 to 5 as above.
These routines support both methods; `~astropy.wcs.Wcsprm.set`
determines which is being used by the presence or absence of a
``CUBEFACE`` axis in `~astropy.wcs.Wcsprm.ctype`.
`~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` translate the
``CUBEFACE`` axis representation to the single plane representation
understood by the lower-level projection routines.
"""
cunit = """
``list of astropy.UnitBase[naxis]`` List of ``CUNITia`` keyvalues as
`astropy.units.UnitBase` instances.
These define the units of measurement of the ``CRVALia``, ``CDELTia``
and ``CDi_ja`` keywords.
As ``CUNITia`` is an optional header keyword,
`~astropy.wcs.Wcsprm.cunit` may be left blank but otherwise is
expected to contain a standard units specification as defined by WCS
Paper I. `~astropy.wcs.Wcsprm.unitfix` is available to translate
commonly used non-standard units specifications but this must be done
as a separate step before invoking `~astropy.wcs.Wcsprm.set`.
For celestial axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to decimal degrees. It then resets
`~astropy.wcs.Wcsprm.cunit` to ``"deg"``.
For spectral axes, if `~astropy.wcs.Wcsprm.cunit` is not blank,
`~astropy.wcs.Wcsprm.set` uses ``wcsunits`` to parse it and scale
`~astropy.wcs.Wcsprm.cdelt`, `~astropy.wcs.Wcsprm.crval`, and
`~astropy.wcs.Wcsprm.cd` to SI units. It then resets
`~astropy.wcs.Wcsprm.cunit` accordingly.
`~astropy.wcs.Wcsprm.set` ignores `~astropy.wcs.Wcsprm.cunit` for
other coordinate types; `~astropy.wcs.Wcsprm.cunit` may be used to
label coordinate values.
"""
cylfix = """
cylfix()
Fixes WCS keyvalues for malformed cylindrical projections.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
data = """
``float array`` The array data for the
`~astropy.wcs.DistortionLookupTable`.
"""
data_wtbarr = """
``double array``
The array data for the BINTABLE.
"""
dateavg = """
``string`` Representative mid-point of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
dateobs = """
``string`` Start of the date of observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateavg
"""
datfix = """
datfix()
Translates the old ``DATE-OBS`` date format to year-2000 standard form
``(yyyy-mm-ddThh:mm:ss)`` and derives ``MJD-OBS`` from it if not
already set.
Alternatively, if `~astropy.wcs.Wcsprm.mjdobs` is set and
`~astropy.wcs.Wcsprm.dateobs` isn't, then `~astropy.wcs.Wcsprm.datfix`
derives `~astropy.wcs.Wcsprm.dateobs` from it. If both are set but
disagree by more than half a day then `ValueError` is raised.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
delta = """
``double array[M]`` (read-only) Interpolated indices into the coord
array.
Array of interpolated indices into the coordinate array such that
Upsilon_m, as defined in Paper III, is equal to
(`~astropy.wcs.Tabprm.p0` [m] + 1) + delta[m].
"""
det2im = """
Convert detector coordinates to image plane coordinates.
"""
det2im1 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *x*-axis.
"""
det2im2 = """
A `~astropy.wcs.DistortionLookupTable` object for detector to image plane
correction in the *y*-axis.
"""
dims = """
``int array[ndim]`` (read-only)
The dimensions of the tabular array
`~astropy.wcs.Wtbarr.data`.
"""
DistortionLookupTable = """
DistortionLookupTable(*table*, *crpix*, *crval*, *cdelt*)
Represents a single lookup table for a `distortion paper`_
transformation.
Parameters
----------
table : 2-dimensional array
The distortion lookup table.
crpix : 2-tuple
The distortion array reference pixel
crval : 2-tuple
The image array pixel coordinate
cdelt : 2-tuple
The grid step size
"""
equinox = """
``double`` The equinox associated with dynamical equatorial or
ecliptic coordinate systems.
``EQUINOXa`` (or ``EPOCH`` in older headers). Not applicable to ICRS
equatorial or ecliptic coordinates.
An undefined value is represented by NaN.
"""
extlev = """
``int`` (read-only)
``EXTLEV`` identifying the binary table extension.
"""
extnam = """
``str`` (read-only)
``EXTNAME`` identifying the binary table extension.
"""
extrema = """
``double array[K_M]...[K_2][2][M]`` (read-only)
An array recording the minimum and maximum value of each element of
the coordinate vector in each row of the coordinate array, with the
dimensions::
(K_M, ... K_2, 2, M)
(see `~astropy.wcs.Tabprm.K`). The minimum is recorded
in the first element of the compressed K_1 dimension, then the
maximum. This array is used by the inverse table lookup function to
speed up table searches.
"""
extver = """
``int`` (read-only)
``EXTVER`` identifying the binary table extension.
"""
find_all_wcs = """
find_all_wcs(relax=0, keysel=0)
Find all WCS transformations in the header.
Parameters
----------
header : str
The raw FITS header data.
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`relaxread` for details.
keysel : sequence of flags
Used to restrict the keyword types considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, `wcspih` is called,
rather than `wcstbh`.
Returns
-------
wcs_list : list of `~astropy.wcs.Wcsprm` objects
"""
fix = """
fix(translate_units='', naxis=0)
Applies all of the corrections handled separately by
`~astropy.wcs.Wcsprm.datfix`, `~astropy.wcs.Wcsprm.unitfix`,
`~astropy.wcs.Wcsprm.celfix`, `~astropy.wcs.Wcsprm.spcfix`,
`~astropy.wcs.Wcsprm.cylfix` and `~astropy.wcs.Wcsprm.cdfix`.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of non-standard unit
strings to perform. By default, performs all.
Although ``"S"`` is commonly used to represent seconds, its
translation to ``"s"`` is potentially unsafe since the standard
recognizes ``"S"`` formally as Siemens, however rarely that may be
used. The same applies to ``"H"`` for hours (Henry), and ``"D"``
for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to ``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to ``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to ``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or ``None``,
then `~astropy.wcs.Wcsprm.cylfix` will not be invoked.
Returns
-------
status : dict
Returns a dictionary containing the following keys, each referring
to a status string for each of the sub-fix functions that were
called:
- `~astropy.wcs.Wcsprm.cdfix`
- `~astropy.wcs.Wcsprm.datfix`
- `~astropy.wcs.Wcsprm.unitfix`
- `~astropy.wcs.Wcsprm.celfix`
- `~astropy.wcs.Wcsprm.spcfix`
- `~astropy.wcs.Wcsprm.cylfix`
"""
get_offset = """
get_offset(x, y) -> (x, y)
Returns the offset as defined in the distortion lookup table.
Returns
-------
coordinate : coordinate pair
The offset from the distortion table for pixel point (*x*, *y*).
"""
get_cdelt = """
get_cdelt() -> double array[naxis]
Coordinate increments (``CDELTia``) for each coord axis.
Returns the ``CDELT`` offsets in read-only form. Unlike the
`~astropy.wcs.Wcsprm.cdelt` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_pc = """
get_pc() -> double array[naxis][naxis]
Returns the ``PC`` matrix in read-only form. Unlike the
`~astropy.wcs.Wcsprm.pc` property, this works even when the header
specifies the linear transformation matrix in one of the alternative
``CDi_ja`` or ``CROTAia`` forms. This is useful when you want access
to the linear transformation matrix, but don't care how it was
specified in the header.
"""
get_ps = """
get_ps() -> list of tuples
Returns ``PSi_ma`` keywords for each *i* and *m*.
Returns
-------
ps : list of tuples
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_ps : Set ``PSi_ma`` values
"""
get_pv = """
get_pv() -> list of tuples
Returns ``PVi_ma`` keywords for each *i* and *m*.
Returns
-------
Returned as a list of tuples of the form (*i*, *m*, *value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.set_pv : Set ``PVi_ma`` values
Notes
-----
Note that, if they were not given, `~astropy.wcs.Wcsprm.set` resets
the entries for ``PVi_1a``, ``PVi_2a``, ``PVi_3a``, and ``PVi_4a`` for
longitude axis *i* to match (``phi_0``, ``theta_0``), the native
longitude and latitude of the reference point given by ``LONPOLEa``
and ``LATPOLEa``.
"""
has_cd = """
has_cd() -> bool
Returns `True` if ``CDi_ja`` is present.
``CDi_ja`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
Matrix elements in the IRAF convention are equivalent to the product
``CDi_ja = CDELTia * PCi_ja``, but the defaults differ from that of
the ``PCi_ja`` matrix. If one or more ``CDi_ja`` keywords are present
then all unspecified ``CDi_ja`` default to zero. If no ``CDi_ja`` (or
``CROTAia``) keywords are present, then the header is assumed to be in
``PCi_ja`` form whether or not any ``PCi_ja`` keywords are present
since this results in an interpretation of ``CDELTia`` consistent with
the original FITS specification.
While ``CDi_ja`` may not formally co-exist with ``PCi_ja``, it may
co-exist with ``CDELTia`` and ``CROTAia`` which are to be ignored.
See also
--------
astropy.wcs.Wcsprm.cd : Get the raw ``CDi_ja`` values.
"""
has_cdi_ja = """
has_cdi_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_cd`. Maintained for backward
compatibility.
"""
has_crota = """
has_crota() -> bool
Returns `True` if ``CROTAia`` is present.
``CROTAia`` is an alternate specification of the linear transformation
matrix, maintained for historical compatibility.
In the AIPS convention, ``CROTAia`` may only be associated with the
latitude axis of a celestial axis pair. It specifies a rotation in
the image plane that is applied *after* the ``CDELTia``; any other
``CROTAia`` keywords are ignored.
``CROTAia`` may not formally co-exist with ``PCi_ja``. ``CROTAia`` and
``CDELTia`` may formally co-exist with ``CDi_ja`` but if so are to be
ignored.
See also
--------
astropy.wcs.Wcsprm.crota : Get the raw ``CROTAia`` values
"""
has_crotaia = """
has_crotaia() -> bool
Alias for `~astropy.wcs.Wcsprm.has_crota`. Maintained for backward
compatibility.
"""
has_pc = """
has_pc() -> bool
Returns `True` if ``PCi_ja`` is present. ``PCi_ja`` is the
recommended way to specify the linear transformation matrix.
See also
--------
astropy.wcs.Wcsprm.pc : Get the raw ``PCi_ja`` values
"""
has_pci_ja = """
has_pci_ja() -> bool
Alias for `~astropy.wcs.Wcsprm.has_pc`. Maintained for backward
compatibility.
"""
i = """
``int`` (read-only)
Image axis number.
"""
imgpix_matrix = """
``double array[2][2]`` (read-only) Inverse of the ``CDELT`` or ``PC``
matrix.
Inverse containing the product of the ``CDELTia`` diagonal matrix and
the ``PCi_ja`` matrix.
"""
is_unity = """
is_unity() -> bool
Returns `True` if the linear transformation matrix
(`~astropy.wcs.Wcsprm.cd`) is unity.
"""
K = """
``int array[M]`` (read-only) The lengths of the axes of the coordinate
array.
An array of length `M` whose elements record the lengths of the axes of
the coordinate array and of each indexing vector.
"""
kind = """
``str`` (read-only)
Character identifying the wcstab array type:
- ``'c'``: coordinate array,
- ``'i'``: index vector.
"""
lat = """
``int`` (read-only) The index into the world coord array containing
latitude values.
"""
latpole = """
``double`` The native latitude of the celestial pole, ``LATPOLEa`` (deg).
"""
lattyp = """
``string`` (read-only) Celestial axis type for latitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lng = """
``int`` (read-only) The index into the world coord array containing
longitude values.
"""
lngtyp = """
``string`` (read-only) Celestial axis type for longitude.
For example, "RA", "DEC", "GLON", "GLAT", etc. extracted from "RA--",
"DEC-", "GLON", "GLAT", etc. in the first four characters of
``CTYPEia`` but with trailing dashes removed.
"""
lonpole = """
``double`` The native longitude of the celestial pole.
``LONPOLEa`` (deg).
"""
M = """
``int`` (read-only) Number of tabular coordinate axes.
"""
m = """
``int`` (read-only)
Array axis number for index vectors.
"""
map = """
``int array[M]`` Association between axes.
A vector of length `~astropy.wcs.Tabprm.M` that defines
the association between axis *m* in the *M*-dimensional coordinate
array (1 <= *m* <= *M*) and the indices of the intermediate world
coordinate and world coordinate arrays.
When the intermediate and world coordinate arrays contain the full
complement of coordinate elements in image-order, as will usually be
the case, then ``map[m-1] == i-1`` for axis *i* in the *N*-dimensional
image (1 <= *i* <= *N*). In terms of the FITS keywords::
map[PVi_3a - 1] == i - 1.
However, a different association may result if the intermediate
coordinates, for example, only contains a (relevant) subset of
intermediate world coordinate elements. For example, if *M* == 1 for
an image with *N* > 1, it is possible to fill the intermediate
coordinates with the relevant coordinate element with ``nelem`` set to
1. In this case ``map[0] = 0`` regardless of the value of *i*.
"""
mix = """
mix(mixpix, mixcel, vspan, vstep, viter, world, pixcrd, origin)
Given either the celestial longitude or latitude plus an element of
the pixel coordinate, solves for the remaining elements by iterating
on the unknown celestial coordinate element using
`~astropy.wcs.Wcsprm.s2p`.
Parameters
----------
mixpix : int
Which element on the pixel coordinate is given.
mixcel : int
Which element of the celestial coordinate is given. If *mixcel* =
``1``, celestial longitude is given in ``world[self.lng]``,
latitude returned in ``world[self.lat]``. If *mixcel* = ``2``,
celestial latitude is given in ``world[self.lat]``, longitude
returned in ``world[self.lng]``.
vspan : pair of floats
Solution interval for the celestial coordinate, in degrees. The
ordering of the two limits is irrelevant. Longitude ranges may be
specified with any convenient normalization, for example
``(-120,+120)`` is the same as ``(240,480)``, except that the
solution will be returned with the same normalization, i.e. lie
within the interval specified.
vstep : float
Step size for solution search, in degrees. If ``0``, a sensible,
although perhaps non-optimal default will be used.
viter : int
If a solution is not found then the step size will be halved and
the search recommenced. *viter* controls how many times the step
size is halved. The allowed range is 5 - 10.
world : double array[naxis]
World coordinate elements. ``world[self.lng]`` and
``world[self.lat]`` are the celestial longitude and latitude, in
degrees. Which is given and which returned depends on the value
of *mixcel*. All other elements are given. The results will be
written to this array in-place.
pixcrd : double array[naxis].
Pixel coordinates. The element indicated by *mixpix* is given and
the remaining elements will be written in-place.
{}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi* (double array[naxis])
- *theta* (double array[naxis])
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd* (double array[naxis])
- Image coordinate elements. ``imgcrd[self.lng]`` and
``imgcrd[self.lat]`` are the projected *x*- and
*y*-coordinates, in decimal degrees.
- *world* (double array[naxis])
- Another reference to the *world* argument passed in.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidCoordinateError
Invalid world coordinate.
NoSolutionError
No solution found in the specified interval.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Get the axes numbers for latitude and longitude
Notes
-----
Initially, the specified solution interval is checked to see if it's a
\"crossing\" interval. If it isn't, a search is made for a crossing
solution by iterating on the unknown celestial coordinate starting at
the upper limit of the solution interval and decrementing by the
specified step size. A crossing is indicated if the trial value of
the pixel coordinate steps through the value specified. If a crossing
interval is found then the solution is determined by a modified form
of \"regula falsi\" division of the crossing interval. If no crossing
interval was found within the specified solution interval then a
search is made for a \"non-crossing\" solution as may arise from a
point of tangency. The process is complicated by having to make
allowance for the discontinuities that occur in all map projections.
Once one solution has been determined others may be found by
subsequent invocations of `~astropy.wcs.Wcsprm.mix` with suitably
restricted solution intervals.
Note the circumstance that arises when the solution point lies at a
native pole of a projection in which the pole is represented as a
finite curve, for example the zenithals and conics. In such cases two
or more valid solutions may exist but `~astropy.wcs.Wcsprm.mix` only
ever returns one.
Because of its generality, `~astropy.wcs.Wcsprm.mix` is very
compute-intensive. For compute-limited applications, more efficient
special-case solvers could be written for simple projections, for
example non-oblique cylindrical projections.
""".format(ORIGIN())
mjdavg = """
``double`` Modified Julian Date corresponding to ``DATE-AVG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdobs
"""
mjdobs = """
``double`` Modified Julian Date corresponding to ``DATE-OBS``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdavg
"""
name = """
``string`` The name given to the coordinate representation
``WCSNAMEa``.
"""
naxis = """
``int`` (read-only) The number of axes (pixel and coordinate).
Given by the ``NAXIS`` or ``WCSAXESa`` keyvalues.
The number of coordinate axes is determined at parsing time, and can
not be subsequently changed.
It is determined from the highest of the following:
1. ``NAXIS``
2. ``WCSAXESa``
3. The highest axis number in any parameterized WCS keyword. The
keyvalue, as well as the keyword, must be syntactically valid
otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header only
contains auxiliary WCS keywords for a particular coordinate
representation, then no coordinate description is constructed for it.
This value may differ for different coordinate representations of the
same image.
"""
nc = """
``int`` (read-only) Total number of coord vectors in the coord array.
Total number of coordinate vectors in the coordinate array being the
product K_1 * K_2 * ... * K_M.
"""
ndim = """
``int`` (read-only)
Expected dimensionality of the wcstab array.
"""
obsgeo = """
``double array[3]`` Location of the observer in a standard terrestrial
reference frame.
``OBSGEO-X``, ``OBSGEO-Y``, ``OBSGEO-Z`` (in meters).
An undefined value is represented by NaN.
"""
p0 = """
``int array[M]`` Interpolated indices into the coordinate array.
Vector of length `~astropy.wcs.Tabprm.M` of interpolated
indices into the coordinate array such that Upsilon_m, as defined in
Paper III, is equal to ``(p0[m] + 1) + delta[m]``.
"""
p2s = """
p2s(pixcrd, origin)
Converts pixel to world coordinates.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *imgcrd*: double array[ncoord][nelem]
- Array of intermediate world coordinates. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo degrees. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *phi*: double array[ncoord]
- *theta*: double array[ncoord]
- Longitude and latitude in the native coordinate system of the
projection, in degrees.
- *world*: double array[ncoord][nelem]
- Array of world coordinates. For celestial axes,
``world[][self.lng]`` and ``world[][self.lat]`` are the
celestial longitude and latitude, in degrees. For spectral
axes, ``world[][self.spec]`` is the intermediate spectral
coordinate, in SI units.
- *stat*: int array[ncoord]
- Status return value for each coordinate. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
*x*- and *y*-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
""".format(ORIGIN())
p4_pix2foc = """
p4_pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Convert pixel coordinates to focal plane coordinates using `distortion
paper`_ lookup-table correction.
Parameters
----------
pixcrd : double array[ncoord][nelem].
Array of pixel coordinates.
{}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
pc = """
``double array[naxis][naxis]`` The ``PCi_ja`` (pixel coordinate)
transformation matrix.
The order is::
[[PC1_1, PC1_2],
[PC2_1, PC2_2]]
For historical compatibility, three alternate specifications of the
linear transformations are available in wcslib. The canonical
``PCi_ja`` with ``CDELTia``, ``CDi_ja``, and the deprecated
``CROTAia`` keywords. Although the latter may not formally co-exist
with ``PCi_ja``, the approach here is simply to ignore them if given
in conjunction with ``PCi_ja``.
`~astropy.wcs.Wcsprm.has_pc`, `~astropy.wcs.Wcsprm.has_cd` and
`~astropy.wcs.Wcsprm.has_crota` can be used to determine which of
these alternatives are present in the header.
These alternate specifications of the linear transformation matrix are
translated immediately to ``PCi_ja`` by `~astropy.wcs.Wcsprm.set` and
are nowhere visible to the lower-level routines. In particular,
`~astropy.wcs.Wcsprm.set` resets `~astropy.wcs.Wcsprm.cdelt` to unity
if ``CDi_ja`` is present (and no ``PCi_ja``). If no ``CROTAia`` is
associated with the latitude axis, `~astropy.wcs.Wcsprm.set` reverts
to a unity ``PCi_ja`` matrix.
"""
phi0 = """
``double`` The native latitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.theta0
"""
pix2foc = """
pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Perform both `SIP`_ polynomial and `distortion paper`_ lookup-table
correction in parallel.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
piximg_matrix = """
``double array[2][2]`` (read-only) Matrix containing the product of
the ``CDELTia`` diagonal matrix and the ``PCi_ja`` matrix.
"""
print_contents = """
print_contents()
Print the contents of the `~astropy.wcs.Wcsprm` object to stdout.
Probably only useful for debugging purposes, and may be removed in the
future.
To get a string of the contents, use `repr`.
"""
print_contents_tabprm = """
print_contents()
Print the contents of the `~astropy.wcs.Tabprm` object to
stdout. Probably only useful for debugging purposes, and may be
removed in the future.
To get a string of the contents, use `repr`.
"""
radesys = """
``string`` The equatorial or ecliptic coordinate system type,
``RADESYSa``.
"""
restfrq = """
``double`` Rest frequency (Hz) from ``RESTFRQa``.
An undefined value is represented by NaN.
"""
restwav = """
``double`` Rest wavelength (m) from ``RESTWAVa``.
An undefined value is represented by NaN.
"""
row = """
``int`` (read-only)
Table row number.
"""
s2p = """
s2p(world, origin)
Transforms world coordinates to pixel coordinates.
Parameters
----------
world : double array[ncoord][nelem]
Array of world coordinates, in decimal degrees.
{}
Returns
-------
result : dict
Returns a dictionary with the following keys:
- *phi*: double array[ncoord]
- *theta*: double array[ncoord]
- Longitude and latitude in the native coordinate system of
the projection, in degrees.
- *imgcrd*: double array[ncoord][nelem]
- Array of intermediate world coordinates. For celestial axes,
``imgcrd[][self.lng]`` and ``imgcrd[][self.lat]`` are the
projected *x*-, and *y*-coordinates, in pseudo \"degrees\".
For quadcube projections with a ``CUBEFACE`` axis, the face
number is also returned in ``imgcrd[][self.cubeface]``. For
spectral axes, ``imgcrd[][self.spec]`` is the intermediate
spectral coordinate, in SI units.
- *pixcrd*: double array[ncoord][nelem]
- Array of pixel coordinates. Pixel coordinates are
zero-based.
- *stat*: int array[ncoord]
- Status return value for each coordinate. ``0`` for success,
``1+`` for invalid pixel coordinate.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
See also
--------
astropy.wcs.Wcsprm.lat, astropy.wcs.Wcsprm.lng
Definition of the latitude and longitude axes
""".format(ORIGIN())
sense = """
``int array[M]`` +1 if monotonically increasing, -1 if decreasing.
A vector of length `~astropy.wcs.Tabprm.M` whose elements
indicate whether the corresponding indexing vector is monotonically
increasing (+1), or decreasing (-1).
"""
set = """
set()
Sets up a WCS object for use according to information supplied within
it.
Note that this routine need not be called directly; it will be invoked
by `~astropy.wcs.Wcsprm.p2s` and `~astropy.wcs.Wcsprm.s2p` if
necessary.
Some attributes that are based on other attributes (such as
`~astropy.wcs.Wcsprm.lattyp` on `~astropy.wcs.Wcsprm.ctype`) may not
be correct until after `~astropy.wcs.Wcsprm.set` is called.
`~astropy.wcs.Wcsprm.set` strips off trailing blanks in all string
members.
`~astropy.wcs.Wcsprm.set` recognizes the ``NCP`` projection and
converts it to the equivalent ``SIN`` projection and it also
recognizes ``GLS`` as a synonym for ``SFL``. It does alias
translation for the AIPS spectral types (``FREQ-LSR``, ``FELO-HEL``,
etc.) but without changing the input header keywords.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
set_tabprm = """
set()
Allocates memory for work arrays.
Also sets up the class according to information supplied within it.
Note that this routine need not be called directly; it will be invoked
by functions that need it.
Raises
------
MemoryError
Memory allocation failed.
InvalidTabularParameters
Invalid tabular parameters.
"""
set_ps = """
set_ps(ps)
Sets ``PSi_ma`` keywords for each *i* and *m*.
Parameters
----------
ps : sequence of tuples
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PSi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PSi_ma``, (i.e. 0-relative)
- *value*: string. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_ps
"""
set_pv = """
set_pv(pv)
Sets ``PVi_ma`` keywords for each *i* and *m*.
Parameters
----------
pv : list of tuples
The input must be a sequence of tuples of the form (*i*, *m*,
*value*):
- *i*: int. Axis number, as in ``PVi_ma``, (i.e. 1-relative)
- *m*: int. Parameter number, as in ``PVi_ma``, (i.e. 0-relative)
- *value*: float. Parameter value.
See also
--------
astropy.wcs.Wcsprm.get_pv
"""
sip = """
Get/set the `~astropy.wcs.Sip` object for performing `SIP`_ distortion
correction.
"""
Sip = """
Sip(*a, b, ap, bp, crpix*)
The `~astropy.wcs.Sip` class performs polynomial distortion correction
using the `SIP`_ convention in both directions.
Parameters
----------
a : double array[m+1][m+1]
The ``A_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``A_ORDER``.
b : double array[m+1][m+1]
The ``B_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``B_ORDER``.
ap : double array[m+1][m+1]
The ``AP_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``AP_ORDER``.
bp : double array[m+1][m+1]
The ``BP_i_j`` polynomial for pixel to focal plane transformation.
Its size must be (*m* + 1, *m* + 1) where *m* = ``BP_ORDER``.
crpix : double array[2]
The reference pixel.
Notes
-----
Shupe, D. L., M. Moshir, J. Li, D. Makovoz and R. Narron. 2005.
"The SIP Convention for Representing Distortion in FITS Image
Headers." ADASS XIV.
"""
sip_foc2pix = """
sip_foc2pix(*foccrd, origin*) -> double array[ncoord][nelem]
Convert focal plane coordinates to pixel coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
foccrd : double array[ncoord][nelem]
Array of focal plane coordinates.
{}
Returns
-------
pixcrd : double array[ncoord][nelem]
Returns an array of pixel coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
sip_pix2foc = """
sip_pix2foc(*pixcrd, origin*) -> double array[ncoord][nelem]
Convert pixel coordinates to focal plane coordinates using the `SIP`_
polynomial distortion convention.
Parameters
----------
pixcrd : double array[ncoord][nelem]
Array of pixel coordinates.
{}
Returns
-------
foccrd : double array[ncoord][nelem]
Returns an array of focal plane coordinates.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(ORIGIN())
spcfix = """
spcfix() -> int
Translates AIPS-convention spectral coordinate types. {``FREQ``,
``VELO``, ``FELO``}-{``OBS``, ``HEL``, ``LSR``} (e.g. ``FREQ-LSR``,
``VELO-OBS``, ``FELO-HEL``)
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
spec = """
``int`` (read-only) The index containing the spectral axis values.
"""
specsys = """
``string`` Spectral reference frame (standard of rest), ``SPECSYSa``.
See also
--------
astropy.wcs.Wcsprm.ssysobs, astropy.wcs.Wcsprm.velosys
"""
sptr = """
sptr(ctype, i=-1)
Translates the spectral axis in a WCS object.
For example, a ``FREQ`` axis may be translated into ``ZOPT-F2W`` and
vice versa.
Parameters
----------
ctype : str
Required spectral ``CTYPEia``, maximum of 8 characters. The first
four characters are required to be given and are never modified.
The remaining four, the algorithm code, are completely determined
by, and must be consistent with, the first four characters.
Wildcarding may be used, i.e. if the final three characters are
specified as ``\"???\"``, or if just the eighth character is
specified as ``\"?\"``, the correct algorithm code will be
substituted and returned.
i : int
Index of the spectral axis (0-relative). If ``i < 0`` (or not
provided), it will be set to the first spectral axis identified
from the ``CTYPE`` keyvalues in the FITS header.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
"""
ssysobs = """
``string`` Spectral reference frame.
The spectral reference frame in which there is no differential
variation in the spectral coordinate across the field-of-view,
``SSYSOBSa``.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.velosys
"""
ssyssrc = """
``string`` Spectral reference frame for redshift.
The spectral reference frame (standard of rest) in which the redshift
was measured, ``SSYSSRCa``.
"""
sub = """
sub(axes)
Extracts the coordinate description for a subimage from a
`~astropy.wcs.WCS` object.
The world coordinate system of the subimage must be separable in the
sense that the world coordinates at any point in the subimage must
depend only on the pixel coordinates of the axes extracted. In
practice, this means that the ``PCi_ja`` matrix of the original image
must not contain non-zero off-diagonal terms that associate any of the
subimage axes with any of the non-subimage axes.
`sub` can also add axes to a wcsprm object. The new axes will be
created using the defaults set by the Wcsprm constructor which produce
a simple, unnamed, linear axis with world coordinates equal to the
pixel coordinate. These default values can be changed before
invoking `set`.
Parameters
----------
axes : int or a sequence.
- If an int, include the first *N* axes in their original order.
- If a sequence, may contain a combination of image axis numbers
(1-relative) or special axis identifiers (see below). Order is
significant; ``axes[0]`` is the axis number of the input image
that corresponds to the first axis in the subimage, etc. Use an
axis number of 0 to create a new axis using the defaults.
- If ``0``, ``[]`` or ``None``, do a deep copy.
Coordinate axes types may be specified using either strings or
special integer constants. The available types are:
- ``'longitude'`` / ``WCSSUB_LONGITUDE``: Celestial longitude
- ``'latitude'`` / ``WCSSUB_LATITUDE``: Celestial latitude
- ``'cubeface'`` / ``WCSSUB_CUBEFACE``: Quadcube ``CUBEFACE`` axis
- ``'spectral'`` / ``WCSSUB_SPECTRAL``: Spectral axis
- ``'stokes'`` / ``WCSSUB_STOKES``: Stokes axis
- ``'celestial'`` / ``WCSSUB_CELESTIAL``: An alias for the
combination of ``'longitude'``, ``'latitude'`` and ``'cubeface'``.
Returns
-------
new_wcs : `~astropy.wcs.WCS` object
Raises
------
MemoryError
Memory allocation failed.
InvalidSubimageSpecificationError
Invalid subimage specification (no spectral axis).
NonseparableSubimageCoordinateSystem
Non-separable subimage coordinate system.
Notes
-----
Combinations of subimage axes of particular types may be extracted in
the same order as they occur in the input image by combining the
integer constants with the 'binary or' (``|``) operator. For
example::
wcs.sub([WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_SPECTRAL])
would extract the longitude, latitude, and spectral axes in the same
order as the input image. If one of each were present, the resulting
object would have three dimensions.
For convenience, ``WCSSUB_CELESTIAL`` is defined as the combination
``WCSSUB_LONGITUDE | WCSSUB_LATITUDE | WCSSUB_CUBEFACE``.
The codes may also be negated to extract all but the types specified,
for example::
wcs.sub([
WCSSUB_LONGITUDE,
WCSSUB_LATITUDE,
WCSSUB_CUBEFACE,
-(WCSSUB_SPECTRAL | WCSSUB_STOKES)])
The last of these specifies all axis types other than spectral or
Stokes. Extraction is done in the order specified by ``axes``, i.e. a
longitude axis (if present) would be extracted first (via ``axes[0]``)
and not subsequently (via ``axes[3]``). Likewise for the latitude and
cubeface axes in this example.
The number of dimensions in the returned object may be less than or
greater than the length of ``axes``. However, it will never exceed the
number of axes in the input image.
"""
tab = """
``list of Tabprm`` Tabular coordinate objects.
A list of tabular coordinate objects associated with this WCS.
"""
Tabprm = """
A class to store the information related to tabular coordinates,
i.e., coordinates that are defined via a lookup table.
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.tab`.
"""
theta0 = """
``double`` The native longitude of the fiducial point.
The point whose celestial coordinates are given in ``ref[1:2]``. If
undefined (NaN) the initialization routine, `~astropy.wcs.Wcsprm.set`,
will set this to a projection-specific default.
See also
--------
astropy.wcs.Wcsprm.phi0
"""
to_header = """
to_header(relax=False)
`to_header` translates a WCS object into a FITS header.
The details of the header depends on context:
- If the `~astropy.wcs.Wcsprm.colnum` member is non-zero then a
binary table image array header will be produced.
- Otherwise, if the `~astropy.wcs.Wcsprm.colax` member is set
non-zero then a pixel list header will be produced.
- Otherwise, a primary image or image extension header will be
produced.
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required keywords
such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or ``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will be
translated to standard (this is partially dependent on whether
``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and only if
they differ from the unit matrix. Thus, if the matrix is unity
then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`~astropy.wcs.Wcsprm.to_header` tries hard to write meaningful
comments.
8. Keyword order may be changed.
Keywords can be translated between the image array, binary table, and
pixel lists forms by manipulating the `~astropy.wcs.Wcsprm.colnum` or
`~astropy.wcs.Wcsprm.colax` members of the `~astropy.wcs.WCS`
object.
Parameters
----------
relax : bool or int
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to write.
See :ref:`relaxwrite` for details.
Returns
-------
header : str
Raw FITS header as a string.
"""
ttype = """
``str`` (read-only)
``TTYPEn`` identifying the column of the binary table that contains
the wcstab array.
"""
unitfix = """
unitfix(translate_units='')
Translates non-standard ``CUNITia`` keyvalues.
For example, ``DEG`` -> ``deg``, also stripping off unnecessary
whitespace.
Parameters
----------
translate_units : str, optional
Do potentially unsafe translations of non-standard unit strings.
Although ``\"S\"`` is commonly used to represent seconds, its
recognizes ``\"S\"`` formally as Siemens, however rarely that may
be translation to ``\"s\"`` is potentially unsafe since the
standard used. The same applies to ``\"H\"`` for hours (Henry),
and ``\"D\"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``\"s\"``, translate ``\"S\"`` to ``\"s\"``.
- If the string contains ``\"h\"``, translate ``\"H\"`` to ``\"h\"``.
- If the string contains ``\"d\"``, translate ``\"D\"`` to ``\"d\"``.
Thus ``''`` doesn't do any unsafe translations, whereas ``'shd'``
does all of them.
Returns
-------
success : int
Returns ``0`` for success; ``-1`` if no change required.
"""
velangl = """
``double`` Velocity angle.
The angle in degrees that should be used to decompose an observed
velocity into radial and transverse components.
An undefined value is represented by NaN.
"""
velosys = """
``double`` Relative radial velocity.
The relative radial velocity (m/s) between the observer and the
selected standard of rest in the direction of the celestial reference
coordinate, ``VELOSYSa``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.specsys, astropy.wcs.Wcsprm.ssysobs
"""
velref = """
``int`` AIPS velocity code.
From ``VELREF`` keyword.
"""
wcs = """
A `~astropy.wcs.Wcsprm` object to perform the basic `wcslib`_ WCS
transformation.
"""
Wcs = """
Wcs(*sip, cpdis, wcsprm, det2im*)
Wcs objects amalgamate basic WCS (as provided by `wcslib`_), with
`SIP`_ and `distortion paper`_ operations.
To perform all distortion corrections and WCS transformation, use
``all_pix2world``.
Parameters
----------
sip : `~astropy.wcs.Sip` object or `None`
cpdis : A pair of `~astropy.wcs.DistortionLookupTable` objects, or
``(None, None)``.
wcsprm : `~astropy.wcs.Wcsprm` object
det2im : A pair of `~astropy.wcs.DistortionLookupTable` objects, or
``(None, None)``.
"""
Wcsprm = """
Wcsprm(header=None, key=' ', relax=False, naxis=2, keysel=0, colsel=None)
`~astropy.wcs.Wcsprm` performs the core WCS transformations.
.. note::
The members of this object correspond roughly to the key/value
pairs in the FITS header. However, they are adjusted and
normalized in a number of ways that make performing the WCS
transformation easier. Therefore, they can not be relied upon to
get the original values in the header. For that, use
`astropy.io.fits.Header` directly.
The FITS header parsing enforces correct FITS "keyword = value" syntax
with regard to the equals sign occurring in columns 9 and 10.
However, it does recognize free-format character (NOST 100-2.0,
Sect. 5.2.1), integer (Sect. 5.2.3), and floating-point values
(Sect. 5.2.4) for all keywords.
Parameters
----------
header : An `astropy.io.fits.Header`, string, or `None`.
If ``None``, the object will be initialized to default values.
key : str, optional
The key referring to a particular WCS transform in the header.
This may be either ``' '`` or ``'A'``-``'Z'`` and corresponds to
the ``\"a\"`` part of ``\"CTYPEia\"``. (*key* may only be
provided if *header* is also provided.)
relax : bool or int, optional
Degree of permissiveness:
- `False`: Recognize only FITS keywords defined by the published
WCS standard.
- `True`: Admit all recognized informal extensions of the WCS
standard.
- `int`: a bit field selecting specific extensions to accept. See
:ref:`relaxread` for details.
naxis : int, optional
The number of world coordinates axes for the object. (*naxis* may
only be provided if *header* is `None`.)
keysel : sequence of flag bits, optional
Vector of flag bits that may be used to restrict the keyword types
considered:
- ``WCSHDR_IMGHEAD``: Image header keywords.
- ``WCSHDR_BIMGARR``: Binary table image array.
- ``WCSHDR_PIXLIST``: Pixel list keywords.
If zero, there is no restriction. If -1, the underlying wcslib
function ``wcspih()`` is called, rather than ``wcstbh()``.
colsel : sequence of int
A sequence of table column numbers used to restrict the keywords
considered. `None` indicates no restriction.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
"""
Wtbarr = """
Classes to construct coordinate lookup tables from a binary table
extension (BINTABLE).
This class can not be constructed directly from Python, but instead is
returned from `~astropy.wcs.Wcsprm.wtb`.
"""
zsource = """
``double`` The redshift, ``ZSOURCEa``, of the source.
An undefined value is represented by NaN.
"""
WcsError = """
Base class of all invalid WCS errors.
"""
SingularMatrix = """
SingularMatrixError()
The linear transformation matrix is singular.
"""
InconsistentAxisTypes = """
InconsistentAxisTypesError()
The WCS header inconsistent or unrecognized coordinate axis type(s).
"""
InvalidTransform = """
InvalidTransformError()
The WCS transformation is invalid, or the transformation parameters
are invalid.
"""
InvalidCoordinate = """
InvalidCoordinateError()
One or more of the world coordinates is invalid.
"""
NoSolution = """
NoSolutionError()
No solution can be found in the given interval.
"""
InvalidSubimageSpecification = """
InvalidSubimageSpecificationError()
The subimage specification is invalid.
"""
NonseparableSubimageCoordinateSystem = """
NonseparableSubimageCoordinateSystemError()
Non-separable subimage coordinate system.
"""
NoWcsKeywordsFound = """
NoWcsKeywordsFoundError()
No WCS keywords were found in the given header.
"""
InvalidTabularParameters = """
InvalidTabularParametersError()
The given tabular parameters are invalid.
"""
mjdbeg = """
``double`` Modified Julian Date corresponding to ``DATE-BEG``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdbeg
"""
mjdend = """
``double`` Modified Julian Date corresponding to ``DATE-END``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.mjdend
"""
mjdref = """
``double`` Modified Julian Date corresponding to ``DATE-REF``.
``(MJD = JD - 2400000.5)``.
An undefined value is represented by NaN.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
bepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Besselian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
jepoch = """
``double`` Equivalent to ``DATE-OBS``.
Expressed as a Julian epoch.
See also
--------
astropy.wcs.Wcsprm.dateobs
"""
datebeg = """
``string`` Date at the start of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.datebeg
"""
dateend = """
``string`` Date at the end of the observation.
In ISO format, ``yyyy-mm-ddThh:mm:ss``.
See also
--------
astropy.wcs.Wcsprm.dateend
"""
dateref = """
``string`` Date of a reference epoch relative to which
other time measurements refer.
See also
--------
astropy.wcs.Wcsprm.dateref
"""
timesys = """
``string`` Time scale (UTC, TAI, etc.) in which all other time-related
auxiliary header values are recorded. Also defines the time scale for
an image axis with CTYPEia set to 'TIME'.
See also
--------
astropy.wcs.Wcsprm.timesys
"""
trefpos = """
``string`` Location in space where the recorded time is valid.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
trefdir = """
``string`` Reference direction used in calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
timeunit = """
``string`` Time units in which the following header values are expressed:
``TSTART``, ``TSTOP``, ``TIMEOFFS``, ``TIMSYER``, ``TIMRDER``, ``TIMEDEL``.
It also provides the default value for ``CUNITia`` for time axes.
See also
--------
astropy.wcs.Wcsprm.trefdir
"""
plephem = """
``string`` The Solar System ephemeris used for calculating a pathlength delay.
See also
--------
astropy.wcs.Wcsprm.plephem
"""
tstart = """
``double`` equivalent to DATE-BEG expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstop
"""
tstop = """
``double`` equivalent to DATE-END expressed as a time in units of TIMEUNIT relative to DATEREF+TIMEOFFS.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
telapse = """
``double`` equivalent to the elapsed time between DATE-BEG and DATE-END, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.tstart
"""
timeoffs = """
``double`` Time offset, which may be used, for example, to provide a uniform clock correction
for times referenced to DATEREF.
See also
--------
astropy.wcs.Wcsprm.timeoffs
"""
timsyer = """
``double`` the absolute error of the time values, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timrder
"""
timrder = """
``double`` the accuracy of time stamps relative to each other, in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timsyer
"""
timedel = """
``double`` the resolution of the time stamps.
See also
--------
astropy.wcs.Wcsprm.timedel
"""
timepixr = """
``double`` relative position of the time stamps in binned time intervals, a value between 0.0 and 1.0.
See also
--------
astropy.wcs.Wcsprm.timepixr
"""
obsorbit = """
``string`` URI, URL, or name of an orbit ephemeris file giving spacecraft coordinates relating to TREFPOS.
See also
--------
astropy.wcs.Wcsprm.trefpos
"""
xposure = """
``double`` effective exposure time in units of TIMEUNIT.
See also
--------
astropy.wcs.Wcsprm.timeunit
"""
czphs = """
``double array[naxis]`` The time at the zero point of a phase axis, ``CSPHSia``.
An undefined value is represented by NaN.
"""
cperi = """
``double array[naxis]`` period of a phase axis, CPERIia.
An undefined value is represented by NaN.
"""
| bsd-3-clause |
sdague/home-assistant | homeassistant/components/starline/binary_sensor.py | 19 | 1776 | """Reads vehicle status from StarLine API."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from .account import StarlineAccount, StarlineDevice
from .const import DOMAIN
from .entity import StarlineEntity
SENSOR_TYPES = {
"hbrake": ["Hand Brake", DEVICE_CLASS_POWER],
"hood": ["Hood", DEVICE_CLASS_DOOR],
"trunk": ["Trunk", DEVICE_CLASS_DOOR],
"alarm": ["Alarm", DEVICE_CLASS_PROBLEM],
"door": ["Doors", DEVICE_CLASS_LOCK],
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the StarLine sensors."""
account: StarlineAccount = hass.data[DOMAIN][entry.entry_id]
entities = []
for device in account.api.devices.values():
for key, value in SENSOR_TYPES.items():
if key in device.car_state:
sensor = StarlineSensor(account, device, key, *value)
if sensor.is_on is not None:
entities.append(sensor)
async_add_entities(entities)
class StarlineSensor(StarlineEntity, BinarySensorEntity):
"""Representation of a StarLine binary sensor."""
def __init__(
self,
account: StarlineAccount,
device: StarlineDevice,
key: str,
name: str,
device_class: str,
):
"""Initialize sensor."""
super().__init__(account, device, key, name)
self._device_class = device_class
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._device.car_state.get(self._key)
| apache-2.0 |
ChinaQuants/bokeh | tests/integration/webserver.py | 21 | 3888 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
"""A simple web server for testing purpose.
It serves the testing html pages that are needed by the webdriver unit tests."""
import logging
import os
import socket
import threading
from io import open
try:
from urllib import request as urllib_request
except ImportError:
import urllib as urllib_request
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
LOGGER = logging.getLogger(__name__)
HTML_ROOT = os.path.dirname(__file__)
if not os.path.isdir(HTML_ROOT):
message = ("Can't find 'common_web' directory, try setting WEBDRIVER"
" environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT )
LOGGER.error(message)
assert 0, message
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path), 'r', encoding='latin-1')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read().encode('utf-8'))
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT):
self.stop_serving = False
host = host
port = port
while True:
try:
self.server = HTTPServer(
(host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except socket.error:
LOGGER.debug("port %d is in use, trying to next one"
% port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
LOGGER.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urllib_request.URLopener().open("http://%s:%d" % (self.host,self.port))
except IOError:
pass
LOGGER.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path):
return "http://%s:%d/%s" % (self.host, self.port, path)
| bsd-3-clause |
nathanielvarona/airflow | airflow/contrib/operators/file_to_gcs.py | 3 | 1728 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.google.cloud.transfers.local_to_gcs`."""
import warnings
from airflow.providers.google.cloud.transfers.local_to_gcs import LocalFilesystemToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.transfers.local_to_gcs`,",
DeprecationWarning,
stacklevel=2,
)
class FileToGoogleCloudStorageOperator(LocalFilesystemToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.transfers.local_to_gcs.LocalFilesystemToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.google.cloud.transfers.local_to_gcs.LocalFilesystemToGCSOperator`.""",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| apache-2.0 |
girving/tensorflow | tensorflow/python/debug/lib/debug_gradients_test.py | 30 | 15730 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for debug_gradients module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_gradients
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class IdentifyGradientTest(test_util.TensorFlowTestCase):
def setUp(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
with self.sess.as_default():
self.u = variables.Variable(2.0, name="u")
self.v = variables.Variable(3.0, name="v")
self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w")
def tearDown(self):
ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers()
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w)
with self.assertRaisesRegexp(ValueError,
"The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w)
def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
y = math_ops.add(self.w, -1.0, name="y")
debug_y = grad_debugger_1.identify_gradient(y)
z1 = math_ops.square(debug_y, name="z1")
debug_y = grad_debugger_2.identify_gradient(y)
z2 = math_ops.sqrt(debug_y, name="z2")
with grad_debugger_1:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
with grad_debugger_2:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger_1.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
# There are >1 gradient debuggers registered, and grad_debugger is not used
# as a context manager here, so the gradient w.r.t. self.w will not be
# registered.
gradients_impl.gradients(y, [self.u, self.v])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_1.gradient_tensor(self.w)
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w)
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegexp(
TypeError,
r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer())
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
gradient_descent.GradientDescentOptimizer(0.1).minimize(y)
self.sess.run(variables.global_variables_initializer())
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,
"(u|w):0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
self.assertEqual(2, len(grad_debugger.gradient_tensors()))
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), ops.Tensor)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(1.0, self.sess.run(
grad_debugger.gradient_tensor("w:0")))
self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_x_tensors().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.assertEqual(3, len(grad_debugger.gradient_tensors()))
u_grad = grad_debugger.gradient_tensor(self.u)
w_grad = grad_debugger.gradient_tensor(self.w)
y_grad = grad_debugger.gradient_tensor(y)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(10.0, self.sess.run(y_grad))
self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad))
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1")
z2 = math_ops.sqrt(y, name="z2")
grad_debugger_1 = debug_gradients.GradientsDebugger()
with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
grad_debugger_2 = debug_gradients.GradientsDebugger()
with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0**2, self.sess.run(z1))
self.assertAllClose(5.0**0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(self.sess.graph,
[self.w, self.u, y]):
train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
dump_dir = tempfile.mkdtemp()
debug_url = "file://" + dump_dir
debug_utils.watch_graph(run_options, self.sess.graph, debug_urls=debug_url)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(2.0, self.sess.run(self.u))
self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
self.assertAllClose(-1.0, self.sess.run(self.u))
dump = debug_data.DebugDumpDir(
dump_dir, partition_graphs=run_metadata.partition_graphs)
dump.set_python_graph(self.sess.graph)
y_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, y, dump)
self.assertEqual(1, len(y_grad_values))
self.assertAllClose(10.0, y_grad_values[0])
w_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.w, dump)
self.assertEqual(1, len(w_grad_values))
self.assertAllClose(10.0, w_grad_values[0])
u_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.u, dump)
self.assertEqual(1, len(u_grad_values))
self.assertAllClose(30.0, u_grad_values[0])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "
r"x-tensor v:0"):
debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)
# Cleanup.
shutil.rmtree(dump_dir)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
mammique/django | tests/regressiontests/defer_regress/tests.py | 25 | 10261 | from __future__ import absolute_import
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
from django.db.models.loading import cache
from django.test import TestCase
from .models import (ResolveThis, Item, RelatedItem, Child, Leaf, Proxy,
SimpleItem, Feature, ItemAndSimpleItem, OneToOneItem, SpecialFeature)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
with self.assertNumQueries(0):
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
with self.assertNumQueries(1):
self.assertEqual(obj.value, 42)
with self.assertNumQueries(1):
self.assertEqual(obj.text, "xyzzy")
with self.assertNumQueries(0):
self.assertEqual(obj.text, "xyzzy")
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behavior (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only("name", "child", "second_child", "child__name", "second_child__name").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
# Regression for #11936 - loading.get_models should not return deferred
# models by default.
klasses = sorted(
cache.get_models(cache.get_app("defer_regress")),
key=lambda klass: klass.__name__
)
self.assertEqual(
klasses, [
Child,
Feature,
Item,
ItemAndSimpleItem,
Leaf,
OneToOneItem,
Proxy,
RelatedItem,
ResolveThis,
SimpleItem,
SpecialFeature,
]
)
klasses = sorted(
map(
attrgetter("__name__"),
cache.get_models(
cache.get_app("defer_regress"), include_deferred=True
),
)
)
# FIXME: This is dependent on the order in which tests are run --
# this test case has to be the first, otherwise a LOT more classes
# appear.
self.assertEqual(
klasses, [
"Child",
"Child_Deferred_value",
"Feature",
"Item",
"ItemAndSimpleItem",
"Item_Deferred_name",
"Item_Deferred_name_other_value_text",
"Item_Deferred_name_other_value_value",
"Item_Deferred_other_value_text_value",
"Item_Deferred_text_value",
"Leaf",
"Leaf_Deferred_child_id_second_child_id_value",
"Leaf_Deferred_name_value",
"Leaf_Deferred_second_child_id_value",
"Leaf_Deferred_value",
"OneToOneItem",
"Proxy",
"RelatedItem",
"RelatedItem_Deferred_",
"RelatedItem_Deferred_item_id",
"ResolveThis",
"SimpleItem",
"SpecialFeature",
]
)
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(list(SimpleItem.objects.annotate(Count('feature')).defer('name')), list)
self.assertIsInstance(list(SimpleItem.objects.annotate(Count('feature')).only('name')), list)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = 'QuerySet.only() return bogus results with proxy models'
dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = 'QuerySet.defer() return bogus results with proxy models'
dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
rt = ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
def test_reverse_one_to_one_relations(self):
# Refs #14694. Test reverse relations which are known unique (reverse
# side has o2ofield or unique FK) - the o2o case
item = Item.objects.create(name="first", value=42)
o2o = OneToOneItem.objects.create(item=item, name="second")
self.assertEqual(len(Item.objects.defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item')), 1)
self.assertEqual(len(Item.objects.select_related(
'one_to_one_item').defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item').defer('value')), 1)
# Make sure that `only()` doesn't break when we pass in a unique relation,
# rather than a field on the relation.
self.assertEqual(len(Item.objects.only('one_to_one_item')), 1)
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item').defer(
'value', 'one_to_one_item__name')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.name, "first")
with self.assertNumQueries(1):
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
self.assertEqual(i.value, 42)
def test_defer_with_select_related(self):
item1 = Item.objects.create(name="first", value=47)
item2 = Item.objects.create(name="second", value=42)
simple = SimpleItem.objects.create(name="simple", value="23")
related = ItemAndSimpleItem.objects.create(item=item1, simple=simple)
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item1)
self.assertEqual(obj.item_id, item1.id)
obj.item = item2
obj.save()
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item2)
self.assertEqual(obj.item_id, item2.id)
def test_only_with_select_related(self):
# Test for #17485.
item = SimpleItem.objects.create(name='first', value=47)
feature = Feature.objects.create(item=item)
SpecialFeature.objects.create(feature=feature)
qs = Feature.objects.only('item__name').select_related('item')
self.assertEqual(len(qs), 1)
qs = SpecialFeature.objects.only('feature__item__name').select_related('feature__item')
self.assertEqual(len(qs), 1)
def test_deferred_class_factory(self):
from django.db.models.query_utils import deferred_class_factory
new_class = deferred_class_factory(Item,
('this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered',))
self.assertEqual(new_class.__name__,
'Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e')
| bsd-3-clause |
nowls/gnuradio | gr-analog/python/analog/qa_pll_carriertracking.py | 40 | 6967 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, analog, blocks
class test_pll_carriertracking(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test_pll_carriertracking(self):
expected_result = ((1.000002384185791+7.219194575469601e-09j),
(0.9980257153511047+0.06279045343399048j),
(0.992796003818512+0.11979719996452332j),
(0.9852395057678223+0.17117266356945038j),
(0.9761406779289246+0.2171468883752823j),
(0.9661445617675781+0.25799843668937683j),
(0.9557913541793823+0.29403796792030334j),
(0.9455097317695618+0.3255884349346161j),
(0.935634434223175+0.35297322273254395j),
(0.9264140129089355+0.37650591135025024j),
(0.918036699295044+0.3964899182319641j),
(0.9106329679489136+0.4132115840911865j),
(0.9042812585830688+0.42693787813186646j),
(0.899017333984375+0.4379141628742218j),
(0.89484703540802+0.4463684558868408j),
(0.891755223274231+0.45251286029815674j),
(0.8897027969360352+0.4565400779247284j),
(0.8886303901672363+0.45862627029418945j),
(0.8884686827659607+0.4589335024356842j),
(0.8891477584838867+0.4576151967048645j),
(0.8905870318412781+0.4548112750053406j),
(0.8927018642425537+0.4506511092185974j),
(0.8954030275344849+0.4452534019947052j),
(0.898613452911377+0.43873584270477295j),
(0.9022520780563354+0.4312065541744232j),
(0.9062415361404419+0.42276597023010254j),
(0.9104995131492615+0.4135076403617859j),
(0.9149653315544128+0.4035266935825348j),
(0.9195748567581177+0.3929111361503601j),
(0.9242699146270752+0.3817441761493683j),
(0.9289909601211548+0.37010061740875244j),
(0.9336962103843689+0.3580598831176758j),
(0.9383456707000732+0.3456934690475464j),
(0.9429033994674683+0.3330692648887634j),
(0.9473329186439514+0.3202497363090515j),
(0.9516113996505737+0.3072968125343323j),
(0.9557210206985474+0.2942683696746826j),
(0.9596443772315979+0.2812172472476959j),
(0.963365912437439+0.2681918740272522j),
(0.9668760299682617+0.2552390694618225j),
(0.9701738357543945+0.24240154027938843j),
(0.9732568264007568+0.22971850633621216j),
(0.9761228561401367+0.21722495555877686j),
(0.9787704944610596+0.20495179295539856j),
(0.9812103509902954+0.1929289996623993j),
(0.98344886302948+0.18118229508399963j),
(0.9854917526245117+0.1697331666946411j),
(0.9873413443565369+0.1586003601551056j),
(0.989014744758606+0.147801473736763j),
(0.9905213713645935+0.1373506784439087j),
(0.9918720126152039+0.12725868821144104j),
(0.9930678606033325+0.1175333634018898j),
(0.9941287040710449+0.10818269848823547j),
(0.9950648546218872+0.0992119163274765j),
(0.995887041091919+0.09062285721302032j),
(0.9965973496437073+0.08241605758666992j),
(0.9972119927406311+0.07459107041358948j),
(0.997741162776947+0.06714606285095215j),
(0.9981945753097534+0.06007742881774902j),
(0.9985741376876831+0.05337977409362793j),
(0.9988903999328613+0.04704824090003967j),
(0.9991542100906372+0.04107558727264404j),
(0.9993717074394226+0.03545379638671875j),
(0.9995449185371399+0.03017553687095642j),
(0.9996798634529114+0.025230854749679565j),
(0.999785304069519+0.02061113715171814j),
(0.9998669624328613+0.01630493998527527j),
(0.9999253749847412+0.012303531169891357j),
(0.999961256980896+0.008596181869506836j),
(0.9999842047691345+0.005170613527297974j),
(0.9999972581863403+0.0020167529582977295j),
(1.0000011920928955-0.0008766651153564453j),
(0.9999923706054688-0.0035211145877838135j),
(0.999980092048645-0.00592736154794693j),
(0.9999660849571228-0.008106544613838196j),
(0.9999516606330872-0.010069712996482849j),
(0.9999289512634277-0.011828280985355377j),
(0.9999079704284668-0.013392657041549683j),
(0.9998894333839417-0.01477348804473877j),
(0.9998739957809448-0.015980780124664307j),
(0.9998545050621033-0.017024904489517212j),
(0.9998371601104736-0.017916440963745117j),
(0.9998237490653992-0.01866436004638672j),
(0.999815046787262-0.01927858591079712j),
(0.9998044967651367-0.019767403602600098j),
(0.9997949600219727-0.020140081644058228j),
(0.9997900128364563-0.020405471324920654j),
(0.9997888207435608-0.020570307970046997j),
(0.9997872114181519-0.020643681287765503j),
(0.9997851848602295-0.020633310079574585j),
(0.9997866153717041-0.020545780658721924j),
(0.9997920989990234-0.020388543605804443j),
(0.9997975826263428-0.02016708254814148j),
(0.9998003840446472-0.019888341426849365j),
(0.99980628490448-0.019558459520339966j),
(0.9998152256011963-0.019182950258255005j),
(0.9998254179954529-0.01876668632030487j),
(0.9998309016227722-0.01831553876399994j),
(0.999838650226593-0.017833217978477478j),
(0.9998488426208496-0.017324130982160568j))
sampling_freq = 10e3
freq = sampling_freq / 100
loop_bw = math.pi/100.0
maxf = 1
minf = -1
src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0)
pll = analog.pll_carriertracking_cc(loop_bw, maxf, minf)
head = blocks.head(gr.sizeof_gr_complex, int (freq))
dst = blocks.vector_sink_c()
self.tb.connect(src, pll, head)
self.tb.connect(head, dst)
self.tb.run()
dst_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_pll_carriertracking, "test_pll_carriertracking.xml")
| gpl-3.0 |
Innovahn/odoo.old | openerp/addons/base/__init__.py | 379 | 1134 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andreparrish/python-for-android | python3-alpha/python3-src/Lib/test/test_pkg.py | 55 | 9399 | # Test packages (dotted-name import)
import sys
import os
import tempfile
import textwrap
import unittest
from test import support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
# XXX Things to test
#
# import package without __init__
# import package with __init__
# __init__ importing submodule
# __init__ importing global module
# __init__ defining variables
# submodule importing other submodule
# submodule importing global module
# submodule import submodule via global name
# from package import submodule
# from package import subpackage
# from package import variable (defined in __init__)
# from package import * (defined in __init__)
class TestPkg(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
self.modules_before = support.modules_setup()
def tearDown(self):
sys.path[:] = self.syspath
support.modules_cleanup(*self.modules_before)
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_1(self):
hier = [("t1", None), ("t1 __init__.py", "")]
self.mkhier(hier)
import t1
def test_2(self):
hier = [
("t2", None),
("t2 __init__.py", "'doc for t2'"),
("t2 sub", None),
("t2 sub __init__.py", ""),
("t2 sub subsub", None),
("t2 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
# This exec crap is needed because Py3k forbids 'import *' outside
# of module-scope and __import__() is insufficient for what we need.
s = """
import t2
from t2 import *
self.assertEqual(dir(), ['self', 'sub', 't2'])
"""
self.run_code(s)
from t2 import sub
from t2.sub import subsub
from t2.sub.subsub import spam
self.assertEqual(sub.__name__, "t2.sub")
self.assertEqual(subsub.__name__, "t2.sub.subsub")
self.assertEqual(sub.subsub.__name__, "t2.sub.subsub")
for name in ['spam', 'sub', 'subsub', 't2']:
self.assertTrue(locals()["name"], "Failed to import %s" % name)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
s = """
from t2 import *
self.assertTrue(dir(), ['self', 'sub'])
"""
self.run_code(s)
def test_3(self):
hier = [
("t3", None),
("t3 __init__.py", ""),
("t3 sub", None),
("t3 sub __init__.py", ""),
("t3 sub subsub", None),
("t3 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t3.sub.subsub
self.assertEqual(t3.__name__, "t3")
self.assertEqual(t3.sub.__name__, "t3.sub")
self.assertEqual(t3.sub.subsub.__name__, "t3.sub.subsub")
def test_4(self):
hier = [
("t4.py", "raise RuntimeError('Shouldnt load t4.py')"),
("t4", None),
("t4 __init__.py", ""),
("t4 sub.py", "raise RuntimeError('Shouldnt load sub.py')"),
("t4 sub", None),
("t4 sub __init__.py", ""),
("t4 sub subsub.py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t4 sub subsub", None),
("t4 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
s = """
from t4.sub.subsub import *
self.assertEqual(spam, 1)
"""
self.run_code(s)
def test_5(self):
hier = [
("t5", None),
("t5 __init__.py", "import t5.foo"),
("t5 string.py", "spam = 1"),
("t5 foo.py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', 'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', 'spam'])
def test_6(self):
hier = [
("t6", None),
("t6 __init__.py",
"__all__ = ['spam', 'ham', 'eggs']"),
("t6 spam.py", ""),
("t6 ham.py", ""),
("t6 eggs.py", ""),
]
self.mkhier(hier)
import t6
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__name__', '__package__', '__path__'])
s = """
import t6
from t6 import *
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__name__', '__package__', '__path__',
'eggs', 'ham', 'spam'])
self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6'])
"""
self.run_code(s)
def test_7(self):
hier = [
("t7.py", ""),
("t7", None),
("t7 __init__.py", ""),
("t7 sub.py",
"raise RuntimeError('Shouldnt load sub.py')"),
("t7 sub", None),
("t7 sub __init__.py", ""),
("t7 sub .py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t7 sub subsub", None),
("t7 sub subsub __init__.py",
"spam = 1"),
]
self.mkhier(hier)
t7, sub, subsub = None, None, None
import t7 as tas
self.assertEqual(fixdir(dir(tas)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
from t7 import sub as subpar
self.assertEqual(fixdir(dir(subpar)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.assertFalse(t7)
self.assertFalse(sub)
from t7.sub import subsub as subsubsub
self.assertEqual(fixdir(dir(subsubsub)),
['__cached__', '__doc__', '__file__', '__name__',
'__package__', '__path__', 'spam'])
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
from t7.sub.subsub import spam as ham
self.assertEqual(ham, 1)
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_8(self):
hier = [
("t8", None),
("t8 __init__"+os.extsep+"py", "'doc for t8'"),
]
self.mkhier(hier)
import t8
self.assertEqual(t8.__doc__, "doc for t8")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.