repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
tundra/neutrino | tests/python/plankton/test_options.py | 2 | 3778 | #!/usr/bin/python
# Copyright 2013 the Neutrino authors (see AUTHORS).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from plankton import options
import unittest
a = options.ArgumentElement
f = options.FlagElement
N = None
def o(*elms):
return options.Options(elements=elms)
class OptionsTest(unittest.TestCase):
def run_element_test(self, expected, *args):
result = options.parse(args)
self.assertEquals(expected, result.elements)
def test_expressions(self):
test = self.run_element_test
# Numbers
test([], '')
test([a(100)], '100')
test([a(0)], '0')
test([a(0), a(1), a(2)], '0', '1', '2')
test([a(0), a(1), a(2)], '0 1 2')
# Singletons
test([a(None)], 'null')
test([a(True)], 'true')
test([a(False)], 'false')
# Parens
test([a(0), a(1), a(2)], '0', '(1)', '((2))')
test([a(0), a(1), a(2)], '((0)) (1) 2')
# Quoted strings
test([a('')], '""')
test([a('foo')], '"foo"')
test([a('foo'), a('bar'), a('baz')], '"foo""bar""baz"')
test([a('foo'), a('bar'), a('baz')], '"foo"', '"bar"', '"baz"')
test([a('foo bar baz')], '"foo bar baz"')
# Symbols
test([a('foo')], 'foo')
test([a('f/o/o.bar')], 'f/o/o.bar')
test([a('foo'), a('bar'), a('baz')], 'foo bar baz')
test([a('foo'), a('bar'), a('baz')], 'foo', 'bar', 'baz')
test([a('foo-bar-baz')], 'foo-bar-baz')
test([a('foo_bar_baz')], 'foo_bar_baz')
test([a('foo_123_baz')], 'foo_123_baz')
# Arrays
test([a([])], '[]')
test([a([1])], '[1]')
test([a([1, 2])], '[1 2]')
test([a([1, 2, 3])], '[1 2 3]')
test([a([1, [2, 3], 4])], '[1 [2 3] 4]')
test([a([])], '[', ']')
test([a([1])], '[', '1', ']')
test([a([1, 2])], '[', '1', '2', ']')
test([a([1, 2, 3])], '[', '1 2 3', ']')
test([a([1, [2, 3], 4])], '[', '1', '[', '2', '3', ']', '4', ']')
# Maps
test([a({})], '{}')
test([a({"a": 4})], '{--a 4}')
test([a({"a": N})], '{--a}')
test([a({"a": N, "b": N})], '{--a --b}')
test([a({"a": "--b"})], '{--a "--b"}')
test([a({"a": 4})], '{--(a) 4}')
test([a({"a b c": 4})], '{--"a b c" 4}')
test([a({"--abc": 4})], '{--"--abc" 4}')
test([a({"a b c": N})], '{--"a b c"}')
test([a({"a-b-c": 4})], '{--a-b-c 4}')
test([a({"a": 4, "b": 5, "c": 6})], '{--a 4 --b 5 --c 6}')
test([a({"a": 4, "b": 5, "c": 6})], '{', '--a 4', '--b 5', '--c 6', '}')
test([a({"a": 4, "b": 5, "c": 6})], '{', '--a', '4', '--b', '5', '--c', '6', '}')
test([a({"a": 4, "b": 5, "c": 6})], '{', '--', 'a', '4', '--', 'b', '5', '--' 'c', '6', '}')
test([a({"a": [4, 5, 6]})], '{--a [4 5 6]}')
test([a({"a": [4, 5, 6]})], '{--a[ 4 5 6 ]}')
test([a({"a": [4, 5, 6]})], '{', '--a[', '4', '5', '6', ']', '}')
def test_nested(self):
test = self.run_element_test
test([a(o())], '{{}}')
test([a(o(f('foo', 'bar')))], '{{--foo bar}}')
test([a(o(f('foo', o(a(1), a(2), a(3)))))], '{{--foo {{1 2 3}}}}')
test([a(o(f('foo', N), f('bar', N), f('baz', N)))], '{{ --foo --bar --baz }}')
def test_options(self):
test = self.run_element_test
test(
[f('foo', 'bar')],
'--foo bar')
test(
[f('foo', 'bar'), f('baz', 'quux')],
'--foo bar --baz quux')
test(
[f('foo', 'bar'), f('baz', 'quux')],
'--foo', 'bar', '--baz', 'quux')
test(
[f('foo', N), f('bar', N), f('baz', 'quux')],
'--foo', '--bar', '--baz', 'quux')
test(
[f('foo', N), f('bar', N), f('baz', 'quux')],
'--foo --bar --baz quux')
test(
[a('a'), a('b'), f('foo', 'c'), a('d'), f('bar', 'e'), a('f')],
'a b --foo c d --bar e f')
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=0)
unittest.main(testRunner=runner)
| apache-2.0 |
kionz/librime | thirdparty/src/opencc/deps/gtest-1.7.0/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
memsharded/conan | conans/util/windows.py | 1 | 4269 | import os
import subprocess
import tempfile
from conans.util.env_reader import get_env
from conans.util.files import load, mkdir, rmdir, save
from conans.util.log import logger
from conans.util.sha import sha256
CONAN_LINK = ".conan_link"
CONAN_REAL_PATH = "real_path.txt"
def conan_expand_user(path):
""" wrapper to the original expanduser function, to workaround python returning
verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar
"""
if path[:1] != '~':
return path
# In win these variables should exist and point to user directory, which
# must exist. Using context to avoid permanent modification of os.environ
old_env = dict(os.environ)
try:
home = os.environ.get("HOME")
# Problematic cases of wrong HOME variable
# - HOME = %USERPROFILE% verbatim, as messed by some other tools
# - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx
# In these cases, it is safe to remove it and rely on USERPROFILE directly
if home and (not os.path.exists(home) or
(os.getenv("MSYSTEM") and os.getenv("USERPROFILE"))):
del os.environ["HOME"]
result = os.path.expanduser(path)
finally:
os.environ.clear()
os.environ.update(old_env)
return result
def path_shortener(path, short_paths):
""" short_paths is 4-state:
False: Never shorten the path
True: Always shorten the path, create link if not existing
None: Use shorten path only if already exists, not create
"""
use_always_short_paths = get_env("CONAN_USE_ALWAYS_SHORT_PATHS", False)
short_paths = use_always_short_paths or short_paths
if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
return path
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
return load(link)
elif short_paths is None:
return path
if os.path.exists(path):
rmdir(path)
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
short_home = os.path.join(drive, os.sep, ".conan")
mkdir(short_home)
# Workaround for short_home living in NTFS file systems. Give full control permission
# to current user to avoid
# access problems in cygwin/msys2 windows subsystems when using short_home folder
try:
userdomain, username = os.getenv("USERDOMAIN"), os.environ["USERNAME"]
domainname = "%s\%s" % (userdomain, username) if userdomain else username
cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet
except subprocess.CalledProcessError:
# cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
pass
redirect = hashed_redirect(short_home, path)
if not redirect:
logger.warning("Failed to create a deterministic short path in %s", short_home)
redirect = tempfile.mkdtemp(dir=short_home, prefix="")
# Save the full path of the local cache directory where the redirect is from.
# This file is for debugging purposes and not used by Conan.
save(os.path.join(redirect, CONAN_REAL_PATH), path)
# This "1" is the way to have a non-existing directory, so commands like
# shutil.copytree() to it, works. It can be removed without compromising the
# temp folder generator and conan-links consistency
redirect = os.path.join(redirect, "1")
save(link, redirect)
return redirect
def rm_conandir(path):
"""removal of a directory that might contain a link to a short path"""
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
short_path = load(link)
rmdir(os.path.dirname(short_path))
rmdir(path)
def hashed_redirect(base, path, min_length=6, attempts=10):
max_length = min_length + attempts
full_hash = sha256(path.encode())
assert len(full_hash) > max_length
for length in range(min_length, max_length):
redirect = os.path.join(base, full_hash[:length])
if not os.path.exists(redirect):
return redirect
else:
return None
| mit |
lmorchard/django | tests/urlpatterns_reverse/urls.py | 199 | 5421 | import warnings
from django.conf.urls import include, patterns, url
from django.utils.deprecation import RemovedInDjango110Warning
from .views import (
absolute_kwargs_view, defaults_view, empty_view, empty_view_partial,
empty_view_wrapped, nested_view,
)
other_patterns = [
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', nested_view),
]
# test deprecated patterns() function. convert to list of urls() in Django 1.10
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RemovedInDjango110Warning)
urlpatterns = patterns('',
url(r'^places/([0-9]+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/([0-9]+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>[0-9]+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', absolute_kwargs_view, name="named_optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?$', absolute_kwargs_view, name="named_optional_terminated"),
url(r'^nested/noncapture/(?:(?P<p>\w+))$', empty_view, name='nested-noncapture'),
url(r'^nested/capture/((\w+)/)?$', empty_view, name='nested-capture'),
url(r'^nested/capture/mixed/((?P<p>\w+))$', empty_view, name='nested-mixedcapture'),
url(r'^nested/capture/named/(?P<outer>(?P<inner>\w+)/)?$', empty_view, name='nested-namedcapture'),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>[0-9])/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$([0-9]+)/$', empty_view, name="price"),
url(r'^price/[$]([0-9]+)/$', empty_view, name="price2"),
url(r'^price/[\$]([0-9]+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>[0-9]+(\.[0-9]+)?)\)/$', empty_view, name="product"),
url(r'^headlines/(?P<year>[0-9]+)\.(?P<month>[0-9]+)\.(?P<day>[0-9]+)/$', empty_view, name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"),
url(r'^special_chars/(?P<chars>.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/[0-9]+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url(r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls')),
url(r'^outer-no-kwargs/([0-9]+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
url('', include('urlpatterns_reverse.extra_urls')),
url(r'^lookahead-/(?!not-a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-negative'),
url(r'^lookahead\+/(?=a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-positive'),
url(r'^lookbehind-/(?P<city>[^/]+)(?<!not-a-city)/$', empty_view, name='lookbehind-negative'),
url(r'^lookbehind\+/(?P<city>[^/]+)(?<=a-city)/$', empty_view, name='lookbehind-positive'),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
# Regression views for #9038. See tests for more details
url(r'arg_view/$', 'urlpatterns_reverse.views.kwargs_view'),
url(r'arg_view/(?P<arg1>[0-9]+)/$', 'urlpatterns_reverse.views.kwargs_view'),
url(r'absolute_arg_view/(?P<arg1>[0-9]+)/$', absolute_kwargs_view),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 1}, name='defaults'),
(r'defaults_view2/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
)
| bsd-3-clause |
spring-week-topos/nova-week | nova/openstack/common/report/models/version.py | 27 | 1509 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides Openstack Version Info Model
This module defines a class representing the data
model for Openstack package and version information
"""
import nova.openstack.common.report.models.with_default_views as mwdv
import nova.openstack.common.report.views.text.generic as generic_text_views
class PackageModel(mwdv.ModelWithDefaultViews):
"""A Package Information Model
This model holds information about the current
package. It contains vendor, product, and version
information.
:param str vendor: the product vendor
:param str product: the product name
:param str version: the product version
"""
def __init__(self, vendor, product, version):
super(PackageModel, self).__init__(
text_view=generic_text_views.KeyValueView()
)
self['vendor'] = vendor
self['product'] = product
self['version'] = version
| apache-2.0 |
dmitry-sobolev/ansible | test/runner/lib/import_analysis.py | 12 | 8634 | """Analyze python import statements."""
from __future__ import absolute_import, print_function
import ast
import os
import uuid
from lib.util import (
display,
ApplicationError,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None):
"""Recursively expand module_utils imports from module_utils files.
:type import_name: str
:type depth: int
:type seen: set[str] | None
:rtype set[str]
"""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/'))
if import_path not in imports_by_target_path:
import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py')
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not len(imports[module_util]):
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
base_path = 'lib/ansible/module_utils'
for root, _, file_names in os.walk(base_path):
for file_name in file_names:
path = os.path.join(root, file_name)
name, ext = os.path.splitext(file_name)
if path == 'lib/ansible/module_utils/__init__.py':
continue
if ext != '.py':
continue
if name == '__init__':
module_util = root
else:
module_util = os.path.join(root, name)
module_utils.append(module_util[4:].replace('/', '.'))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name, node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils' or node.module.startswith('ansible.module_utils.'):
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_import('%s.%s' % (node.module, alias.name), node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while len(name) > len('ansible.module_utils.'):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if self.path.startswith('test/'):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
| gpl-3.0 |
marhag87/python-gerrit | gerrit/changes/revision.py | 3 | 1911 | """
Revision
======
Manage gerrit revisions for a change id
"""
from gerrit.error import (
UnhandledError
)
class Revision(object):
"""Manage gerrit revisions"""
def __init__(self, gerrit_con, change_id, revision_id):
"""
:param gerrit_con: The connection object to gerrit
:type gerrit_con: gerrit.Connection
:param change_id: The Change Request ID
:type change_id: str
:param revision_id: The Change Request Patch Set/Revision ID
:type revision_id: str
"""
# HTTP REST API HEADERS
self._change_id = change_id
self._revision_id = revision_id
self._gerrit_con = gerrit_con
def set_review(self, labels=None, message='', comments=None):
"""
Endpoint to create a review for a change_id and a specific patch set
:param labels: This is used to set +2 Code-Review for example.
:type labels: dict
:param message: The message will appear in the actually change-request page.
:type message: str
:param comments: This will become comments in the code.
:type comments: dict
"""
if not labels:
labels = {}
if not comments:
comments = {}
r_endpoint = "/a/changes/%s/revisions/%s/review" % (self._change_id,
self._revision_id)
payload = {}
if labels:
payload['labels'] = labels
if message:
payload['message'] = message
if comments:
payload['comments'] = comments
req = self._gerrit_con.call(
request='post',
r_endpoint=r_endpoint,
r_payload=payload
)
status_code = req.status_code
if status_code == 200:
return True
else:
raise UnhandledError(req.content)
| apache-2.0 |
ajclarke/neo-fighting-dojo | scripts/premake/pack-assets.py | 1 | 1254 | import os
import shutil
import packer
UNPROCESSED_ASSET_FOLDER = "assets/unprocessed"
PROCESSED_ASSET_FOLDER = "assets/preprocessed"
TEXTURE_FOLDER = "assets/compiled"
print("=== Processing unprocessed assets in {} ===".format(
UNPROCESSED_ASSET_FOLDER))
for root, dirs, files in os.walk(UNPROCESSED_ASSET_FOLDER):
images = [f for f in files if f.endswith(".png")]
if images:
print(os.listdir(root))
relative_path = os.path.relpath(root, UNPROCESSED_ASSET_FOLDER)
outpath = os.path.join(TEXTURE_FOLDER, relative_path)
packer.pack(root, outpath)
print("=== Copying processed assets in {} ===".format(PROCESSED_ASSET_FOLDER))
for root, dirs, files in os.walk(PROCESSED_ASSET_FOLDER):
if files:
relative_path = os.path.relpath(root, PROCESSED_ASSET_FOLDER)
outpath = os.path.join(TEXTURE_FOLDER, relative_path)
for file_to_copy in files:
source = os.path.join(PROCESSED_ASSET_FOLDER,
relative_path, file_to_copy)
dest = os.path.join(outpath, file_to_copy)
print("copying {} to {}".format(source, dest))
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copyfile(source, dest)
| gpl-3.0 |
mayankcu/Django-social | venv/Lib/site-packages/django/contrib/localflavor/mx/mx_states.py | 88 | 1296 | # -*- coding: utf-8 -*-
"""
A list of Mexican states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
from django.utils.translation import ugettext_lazy as _
# All 31 states, plus the `Distrito Federal`.
STATE_CHOICES = (
('AGU', _(u'Aguascalientes')),
('BCN', _(u'Baja California')),
('BCS', _(u'Baja California Sur')),
('CAM', _(u'Campeche')),
('CHH', _(u'Chihuahua')),
('CHP', _(u'Chiapas')),
('COA', _(u'Coahuila')),
('COL', _(u'Colima')),
('DIF', _(u'Distrito Federal')),
('DUR', _(u'Durango')),
('GRO', _(u'Guerrero')),
('GUA', _(u'Guanajuato')),
('HID', _(u'Hidalgo')),
('JAL', _(u'Jalisco')),
('MEX', _(u'Estado de México')),
('MIC', _(u'Michoacán')),
('MOR', _(u'Morelos')),
('NAY', _(u'Nayarit')),
('NLE', _(u'Nuevo León')),
('OAX', _(u'Oaxaca')),
('PUE', _(u'Puebla')),
('QUE', _(u'Querétaro')),
('ROO', _(u'Quintana Roo')),
('SIN', _(u'Sinaloa')),
('SLP', _(u'San Luis Potosí')),
('SON', _(u'Sonora')),
('TAB', _(u'Tabasco')),
('TAM', _(u'Tamaulipas')),
('TLA', _(u'Tlaxcala')),
('VER', _(u'Veracruz')),
('YUC', _(u'Yucatán')),
('ZAC', _(u'Zacatecas')),
)
| bsd-3-clause |
ewandor/home-assistant | homeassistant/util/yaml.py | 2 | 11795 | """YAML utility functions."""
import logging
import os
import sys
import fnmatch
from collections import OrderedDict
from typing import Union, List, Dict
import yaml
try:
import keyring
except ImportError:
keyring = None
try:
import credstash # pylint: disable=import-error
except ImportError:
credstash = None
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
_SECRET_NAMESPACE = 'homeassistant'
SECRET_YAML = 'secrets.yaml'
__SECRET_CACHE = {} # type: Dict
class NodeListClass(list):
"""Wrapper class to be able to add attributes on a list."""
pass
class NodeStrClass(str):
"""Wrapper class to be able to add attributes on a string."""
pass
def _add_reference(obj, loader, node):
"""Add file reference information to an object."""
if isinstance(obj, list):
obj = NodeListClass(obj)
if isinstance(obj, str):
obj = NodeStrClass(obj)
setattr(obj, '__config_file__', loader.name)
setattr(obj, '__line__', node.start_mark.line)
return obj
# pylint: disable=too-many-ancestors
class SafeLineLoader(yaml.SafeLoader):
"""Loader class that keeps track of line numbers."""
def compose_node(self, parent: yaml.nodes.Node, index) -> yaml.nodes.Node:
"""Annotate a node with the first line it was seen."""
last_line = self.line # type: int
node = super(SafeLineLoader,
self).compose_node(parent, index) # type: yaml.nodes.Node
node.__line__ = last_line + 1
return node
def load_yaml(fname: str) -> Union[List, Dict]:
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict()
except yaml.YAMLError as exc:
_LOGGER.error(exc)
raise HomeAssistantError(exc)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc)
def dump(_dict: dict) -> str:
"""Dump YAML to a string and remove null."""
return yaml.safe_dump(
_dict, default_flow_style=False, allow_unicode=True) \
.replace(': null\n', ':\n')
def clear_secret_cache() -> None:
"""Clear the secret cache.
Async friendly.
"""
__SECRET_CACHE.clear()
def _include_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node) -> Union[List, Dict]:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
return _add_reference(load_yaml(fname), loader, node)
def _is_file_valid(name: str) -> bool:
"""Decide if a file is valid."""
return not name.startswith('.')
def _find_files(directory: str, pattern: str):
"""Recursively load files in a directory."""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if _is_file_valid(d)]
for basename in files:
if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def _include_dir_named_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node) -> OrderedDict:
"""Load multiple files from directory as a dictionary."""
mapping = OrderedDict() # type: OrderedDict
loc = os.path.join(os.path.dirname(loader.name), node.value)
for fname in _find_files(loc, '*.yaml'):
filename = os.path.splitext(os.path.basename(fname))[0]
mapping[filename] = load_yaml(fname)
return _add_reference(mapping, loader, node)
def _include_dir_merge_named_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node) -> OrderedDict:
"""Load multiple files from directory as a merged dictionary."""
mapping = OrderedDict() # type: OrderedDict
loc = os.path.join(os.path.dirname(loader.name), node.value)
for fname in _find_files(loc, '*.yaml'):
if os.path.basename(fname) == SECRET_YAML:
continue
loaded_yaml = load_yaml(fname)
if isinstance(loaded_yaml, dict):
mapping.update(loaded_yaml)
return _add_reference(mapping, loader, node)
def _include_dir_list_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a list."""
loc = os.path.join(os.path.dirname(loader.name), node.value)
return [load_yaml(f) for f in _find_files(loc, '*.yaml')
if os.path.basename(f) != SECRET_YAML]
def _include_dir_merge_list_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a merged list."""
loc = os.path.join(os.path.dirname(loader.name),
node.value) # type: str
merged_list = [] # type: List
for fname in _find_files(loc, '*.yaml'):
if os.path.basename(fname) == SECRET_YAML:
continue
loaded_yaml = load_yaml(fname)
if isinstance(loaded_yaml, list):
merged_list.extend(loaded_yaml)
return _add_reference(merged_list, loader, node)
def _ordered_dict(loader: SafeLineLoader,
node: yaml.nodes.MappingNode) -> OrderedDict:
"""Load YAML mappings into an ordered dictionary to preserve key order."""
loader.flatten_mapping(node)
nodes = loader.construct_pairs(node)
seen = {} # type: Dict
for (key, _), (child_node, _) in zip(nodes, node.value):
line = child_node.start_mark.line
try:
hash(key)
except TypeError:
fname = getattr(loader.stream, 'name', '')
raise yaml.MarkedYAMLError(
context="invalid key: \"{}\"".format(key),
context_mark=yaml.Mark(fname, 0, line, -1, None, None)
)
if key in seen:
fname = getattr(loader.stream, 'name', '')
_LOGGER.error(
'YAML file %s contains duplicate key "%s". '
'Check lines %d and %d.', fname, key, seen[key], line)
seen[key] = line
return _add_reference(OrderedDict(nodes), loader, node)
def _construct_seq(loader: SafeLineLoader, node: yaml.nodes.Node):
"""Add line number and file name to Load YAML sequence."""
obj, = loader.construct_yaml_seq(node)
return _add_reference(obj, loader, node)
def _env_var_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load environment variables and embed it into the configuration YAML."""
args = node.value.split()
# Check for a default value
if len(args) > 1:
return os.getenv(args[0], ' '.join(args[1:]))
elif args[0] in os.environ:
return os.environ[args[0]]
else:
_LOGGER.error("Environment variable %s not defined.", node.value)
raise HomeAssistantError(node.value)
def _load_secret_yaml(secret_path: str) -> Dict:
"""Load the secrets yaml from path."""
secret_path = os.path.join(secret_path, SECRET_YAML)
if secret_path in __SECRET_CACHE:
return __SECRET_CACHE[secret_path]
_LOGGER.debug('Loading %s', secret_path)
try:
secrets = load_yaml(secret_path)
if 'logger' in secrets:
logger = str(secrets['logger']).lower()
if logger == 'debug':
_LOGGER.setLevel(logging.DEBUG)
else:
_LOGGER.error("secrets.yaml: 'logger: debug' expected,"
" but 'logger: %s' found", logger)
del secrets['logger']
except FileNotFoundError:
secrets = {}
__SECRET_CACHE[secret_path] = secrets
return secrets
# pylint: disable=protected-access
def _secret_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load secrets and embed it into the configuration YAML."""
secret_path = os.path.dirname(loader.name)
while True:
secrets = _load_secret_yaml(secret_path)
if node.value in secrets:
_LOGGER.debug("Secret %s retrieved from secrets.yaml in "
"folder %s", node.value, secret_path)
return secrets[node.value]
if secret_path == os.path.dirname(sys.path[0]):
break # sys.path[0] set to config/deps folder by bootstrap
secret_path = os.path.dirname(secret_path)
if not os.path.exists(secret_path) or len(secret_path) < 5:
break # Somehow we got past the .homeassistant config folder
if keyring:
# do some keyring stuff
pwd = keyring.get_password(_SECRET_NAMESPACE, node.value)
if pwd:
_LOGGER.debug("Secret %s retrieved from keyring", node.value)
return pwd
global credstash # pylint: disable=invalid-name
if credstash:
try:
pwd = credstash.getSecret(node.value, table=_SECRET_NAMESPACE)
if pwd:
_LOGGER.debug("Secret %s retrieved from credstash", node.value)
return pwd
except credstash.ItemNotFound:
pass
except Exception: # pylint: disable=broad-except
# Catch if package installed and no config
credstash = None
_LOGGER.error("Secret %s not defined", node.value)
raise HomeAssistantError(node.value)
yaml.SafeLoader.add_constructor('!include', _include_yaml)
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_ordered_dict)
yaml.SafeLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, _construct_seq)
yaml.SafeLoader.add_constructor('!env_var', _env_var_yaml)
yaml.SafeLoader.add_constructor('!secret', _secret_yaml)
yaml.SafeLoader.add_constructor('!include_dir_list', _include_dir_list_yaml)
yaml.SafeLoader.add_constructor('!include_dir_merge_list',
_include_dir_merge_list_yaml)
yaml.SafeLoader.add_constructor('!include_dir_named', _include_dir_named_yaml)
yaml.SafeLoader.add_constructor('!include_dir_merge_named',
_include_dir_merge_named_yaml)
# From: https://gist.github.com/miracle2k/3184458
# pylint: disable=redefined-outer-name
def represent_odict(dump, tag, mapping, flow_style=None):
"""Like BaseRepresenter.represent_mapping but does not issue the sort()."""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = dump.represent_data(item_key)
node_value = dump.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and
not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
yaml.SafeDumper.add_representer(
OrderedDict,
lambda dumper, value:
represent_odict(dumper, 'tag:yaml.org,2002:map', value))
yaml.SafeDumper.add_representer(
NodeListClass,
lambda dumper, value:
dumper.represent_sequence('tag:yaml.org,2002:seq', value))
| apache-2.0 |
dgjustice/ansible | test/units/modules/network/ios/test_ios_system.py | 59 | 5262 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_system
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosSystemModule(TestIosModule):
module = ios_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.ios.ios_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commnads=None):
self.get_config.return_value = load_fixture('ios_system_config.cfg')
self.load_config.return_value = None
def test_ios_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_name(self):
set_module_args(dict(domain_name=['test.com']))
commands = ['ip domain name test.com',
'no ip domain name eng.example.net',
'no ip domain name vrf management eng.example.net']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_name_complex(self):
set_module_args(dict(domain_name=[{'name': 'test.com', 'vrf': 'test'},
{'name': 'eng.example.net'}]))
commands = ['ip domain name vrf test test.com',
'no ip domain name vrf management eng.example.net']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_search(self):
set_module_args(dict(domain_search=['ansible.com', 'redhat.com']))
commands = ['no ip domain list vrf management example.net',
'no ip domain list example.net',
'no ip domain list example.com',
'ip domain list ansible.com',
'ip domain list redhat.com']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_domain_search_complex(self):
set_module_args(dict(domain_search=[{'name': 'ansible.com', 'vrf': 'test'}]))
commands = ['no ip domain list vrf management example.net',
'no ip domain list example.net',
'no ip domain list example.com',
'ip domain list vrf test ansible.com']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_lookup_source(self):
set_module_args(dict(lookup_source='Ethernet1'))
commands = ['ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
def test_ios_system_name_servers(self):
name_servers = ['8.8.8.8', '8.8.4.4']
set_module_args(dict(name_servers=name_servers))
commands = ['no ip name-server vrf management 8.8.8.8',
'ip name-server 8.8.4.4']
self.execute_module(changed=True, commands=commands, sort=False)
def rest_ios_system_name_servers_complex(self):
name_servers = dict(server='8.8.8.8', vrf='test')
set_module_args(dict(name_servers=name_servers))
commands = ['no name-server 8.8.8.8',
'no name-server vrf management 8.8.8.8',
'ip name-server vrf test 8.8.8.8']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = ['no hostname',
'no ip domain lookup source-interface GigabitEthernet0/0',
'no ip domain list vrf management', 'no ip domain list',
'no ip domain name vrf management', 'no ip domain name',
'no ip name-server vrf management', 'no ip name-server']
self.execute_module(changed=True, commands=commands)
def test_ios_system_no_change(self):
set_module_args(dict(hostname='ios01'))
self.execute_module(commands=[])
def test_ios_system_missing_vrf(self):
name_servers = dict(server='8.8.8.8', vrf='missing')
set_module_args(dict(name_servers=name_servers))
self.execute_module(failed=True)
| gpl-3.0 |
chubbymaggie/ROPgadget | dependencies/capstone-next/bindings/python/test_skipdata.py | 3 | 2417 | #!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
from __future__ import print_function
from capstone import *
import binascii
from test import to_x, to_hex, to_x_32
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x00\x91\x92"
RANDOM_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
all_tests = (
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, RANDOM_CODE, "Arm", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print
# Sample callback for SKIPDATA option
def testcb(buffer, offset, userdata):
# always skip 2 bytes of data
return 2
# ## Test class Cs
def test_class():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 16)
print("Platform: %s" %comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
md.skipdata = True
# Default "data" instruction's name is ".byte". To rename it to "db", just uncomment
# the code below.
# md.skipdata_setup = ("db", None, None)
# NOTE: This example ignores SKIPDATA's callback (first None) & user_data (second None)
# To customize the SKIPDATA callback, uncomment the line below.
# md.skipdata_setup = (".db", CS_SKIPDATA_CALLBACK(testcb), None)
for insn in md.disasm(code, 0x1000):
#bytes = binascii.hexlify(insn.bytes)
#print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
| gpl-2.0 |
domyam97/airlights | slack/venv/lib/python2.7/site-packages/urllib3/_collections.py | 95 | 10204 | from __future__ import absolute_import
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = OrderedDict()
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = [key, val]
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
vals.append(val)
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key, default=__marker):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
if default is self.__marker:
return []
return default
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
| mit |
durai145/youtube-dl | youtube_dl/extractor/pornhub.py | 51 | 5647 | from __future__ import unicode_literals
import os
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
str_to_int,
)
from ..aes import (
aes_decrypt_text
)
class PornHubIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '882f488fa1f0026f023f33576004a2ed',
'info_dict': {
'id': '648719015',
'ext': 'mp4',
"uploader": "Babes",
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
"age_limit": 18
}
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
}]
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/\d+)\1', webpage)
if mobj:
return mobj.group('url')
def _extract_count(self, pattern, webpage, name):
return str_to_int(self._search_regex(
pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url):
video_id = self._match_id(url)
req = compat_urllib_request.Request(
'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>',
webpage, 'error message', default=None)
if error_msg:
error_msg = re.sub(r'\s+', ' ', error_msg)
raise ExtractorError(
'PornHub said: %s' % error_msg,
expected=True, video_id=video_id)
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
video_uploader = self._html_search_regex(
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = compat_urllib_parse_unquote(thumbnail)
view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
like_count = self._extract_count(
r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
dislike_count = self._extract_count(
r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage)))
if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse_unquote_plus(
self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
formats = []
for video_url in video_urls:
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
if m is None:
height = None
tbr = None
else:
height = int(m.group('height'))
tbr = int(m.group('tbr'))
formats.append({
'url': video_url,
'ext': extension,
'format': format,
'format_id': format,
'tbr': tbr,
'height': height,
})
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'thumbnail': thumbnail,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'formats': formats,
'age_limit': 18,
}
class PornHubPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.pornhub.com/playlist/6201671',
'info_dict': {
'id': '6201671',
'title': 'P0p4',
},
'playlist_mincount': 35,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage))
]
playlist = self._parse_json(
self._search_regex(
r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
playlist_id)
return self.playlist_result(
entries, playlist_id, playlist.get('title'), playlist.get('description'))
| unlicense |
i-am-offline/smarthome | plugins/artnet/__init__.py | 12 | 4335 | #!/usr/bin/env python3
# coding=utf-8
#
# Copyright 2013 KNX-User-Forum e.V. http://knx-user-forum.de/
# Author mode@gmx.co.uk
#
# This file is part of SmartHome.py. http://mknx.github.io/smarthome/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import socket
import struct
logger = logging.getLogger('Artnet')
class ArtNet():
packet_counter = 1
dmxdata = [0, 0]
def __init__(self, smarthome, artnet_net, artnet_subnet, artnet_universe, ip, port):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.net = int(artnet_net)
self.subnet = int(artnet_subnet)
self.universe = int(artnet_universe)
self.ip = ip
self.port = int(port)
logger.debug("Init ArtNet Plugin done")
def run(self):
pass
def stop(self):
self.close()
def __call__(self, var1=None, var2=None):
if type(var1) == int and type(var2) == int:
self.send_single_value(var1, var2)
if type(var1) == int and type(var2) == list:
self.send_frame_starting_at(var1, var2)
if type(var1) == list and type(var2) == type(None):
self.send_frame(var1)
def send_single_value(self, adr, value):
if adr < 1 or adr > 512:
logger.error("DMX address %s invalid" % adr)
return
while len(self.dmxdata) < adr:
self.dmxdata.append(0)
self.dmxdata[adr - 1] = value
self.__ArtDMX_broadcast()
def send_frame_starting_at(self, adr, values):
if adr < 1 or adr > (512 - len(values) + 1):
logger.error("DMX address %s with length %s invalid" %
(adr, len(values)))
return
while len(self.dmxdata) < (adr + len(values) - 1):
self.dmxdata.append(0)
cnt = 0
for value in values:
self.dmxdata[adr - 1 + cnt] = value
cnt += 1
self.__ArtDMX_broadcast()
def send_frame(self, dmxframe):
if len(dmxframe) < 2:
logger.error("Send at least 2 channels")
return
self.dmxdata = dmxframe
self.__ArtDMX_broadcast()
def __ArtDMX_broadcast(self):
# logger.info("Incomming DMX: %s"%self.dmxdata)
# New Array
data = []
# Fix ID 7byte + 0x00
data.append("Art-Net\x00")
# OpCode = OpOutput / OpDmx -> 0x5000, Low Byte first
data.append(struct.pack('<H', 0x5000))
# ProtVerHi and ProtVerLo -> Protocol Version 14, High Byte first
data.append(struct.pack('>H', 14))
# Order 1 to 255
data.append(struct.pack('B', self.packet_counter))
self.packet_counter += 1
if self.packet_counter > 255:
self.packet_counter = 1
# Physical Input Port
data.append(struct.pack('B', 0))
# Artnet source address
data.append(
struct.pack('<H', self.net << 8 | self.subnet << 4 | self.universe))
# Length of DMX Data, High Byte First
data.append(struct.pack('>H', len(self.dmxdata)))
# DMX Data
for d in self.dmxdata:
data.append(struct.pack('B', d))
# convert from list to string
result = bytes()
for token in data:
try: # Handels all strings
result = result + token.encode('utf-8', 'ignore')
except: # Handels all bytes
result = result + token
# data = "".join(data)
# debug
# logger.info("Outgoing Artnet:%s"%(':'.join(x.encode('hex') for x in data)))
# send over ethernet
self.s.sendto(result, (self.ip, self.port))
def close(self):
self.s.close()
| gpl-3.0 |
felixma/nova | nova/virt/hyperv/basevolumeutils.py | 59 | 5541 | #
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import abc
import re
import sys
if sys.platform == 'win32':
import _winreg
import wmi
from oslo_log import log as logging
from nova import block_device
from nova.i18n import _LI
from nova.virt import driver
LOG = logging.getLogger(__name__)
class BaseVolumeUtils(object):
_FILE_DEVICE_DISK = 7
def __init__(self, host='.'):
if sys.platform == 'win32':
self._conn_wmi = wmi.WMI(moniker='//%s/root/wmi' % host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
self._drive_number_regex = re.compile(r'DeviceID=\"[^,]*\\(\d+)\"')
@abc.abstractmethod
def login_storage_target(self, target_lun, target_iqn, target_portal):
pass
@abc.abstractmethod
def logout_storage_target(self, target_iqn):
pass
@abc.abstractmethod
def execute_log_out(self, session_id):
pass
def get_iscsi_initiator(self):
"""Get iscsi initiator name for this machine."""
computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
hostname = computer_system.name
keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
"iSCSI\\Discovery")
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
_winreg.KEY_ALL_ACCESS)
temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
initiator_name = str(temp[0])
_winreg.CloseKey(key)
except Exception:
LOG.info(_LI("The ISCSI initiator name can't be found. "
"Choosing the default one"))
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
if computer_system.PartofDomain:
initiator_name += '.' + computer_system.Domain.lower()
return initiator_name
def volume_in_mapping(self, mount_device, block_device_info):
block_device_list = [block_device.strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(
block_device.strip_dev(swap['device_name']))
block_device_list += [block_device.strip_dev(
ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(block_device_info)]
LOG.debug("block_device_list %s", block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
def _get_drive_number_from_disk_path(self, disk_path):
drive_number = self._drive_number_regex.findall(disk_path)
if drive_number:
return int(drive_number[0])
def get_session_id_from_mounted_disk(self, physical_drive_path):
drive_number = self._get_drive_number_from_disk_path(
physical_drive_path)
if not drive_number:
return None
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
device_number = device.DeviceNumber
if device_number == drive_number:
return initiator_session.SessionId
def _get_devices_for_target(self, target_iqn):
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
TargetName=target_iqn)
if not initiator_sessions:
return []
return initiator_sessions[0].Devices
def get_device_number_for_target(self, target_iqn, target_lun):
devices = self._get_devices_for_target(target_iqn)
for device in devices:
if device.ScsiLun == target_lun:
return device.DeviceNumber
def get_target_lun_count(self, target_iqn):
devices = self._get_devices_for_target(target_iqn)
disk_devices = [device for device in devices
if device.DeviceType == self._FILE_DEVICE_DISK]
return len(disk_devices)
def get_target_from_disk_path(self, disk_path):
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
drive_number = self._get_drive_number_from_disk_path(disk_path)
if not drive_number:
return None
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
if device.DeviceNumber == drive_number:
return (device.TargetName, device.ScsiLun)
| apache-2.0 |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptcommandline.py | 3 | 36909 | from __future__ import absolute_import, print_function
import argparse
import os
import sys
from collections import OrderedDict
from distutils.spawn import find_executable
from datetime import timedelta
from six import iterkeys, itervalues, iteritems
from . import config
from . import wpttest
from .formatters import chromium, wptreport, wptscreenshot
def abs_path(path):
return os.path.abspath(os.path.expanduser(path))
def url_or_path(path):
from six.moves.urllib.parse import urlparse
parsed = urlparse(path)
if len(parsed.scheme) > 2:
return path
else:
return abs_path(path)
def require_arg(kwargs, name, value_func=None):
if value_func is None:
value_func = lambda x: x is not None
if name not in kwargs or not value_func(kwargs[name]):
print("Missing required argument %s" % name, file=sys.stderr)
sys.exit(1)
def create_parser(product_choices=None):
from mozlog import commandline
from . import products
if product_choices is None:
config_data = config.load()
product_choices = products.products_enabled(config_data)
parser = argparse.ArgumentParser(description="""Runner for web-platform-tests tests.""",
usage="""%(prog)s [OPTION]... [TEST]...
TEST is either the full path to a test file to run, or the URL of a test excluding
scheme host and port.""")
parser.add_argument("--manifest-update", action="store_true", default=None,
help="Regenerate the test manifest.")
parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update",
help="Prevent regeneration of the test manifest.")
parser.add_argument("--manifest-download", action="store_true", default=None,
help="Attempt to download a preexisting manifest when updating.")
parser.add_argument("--no-manifest-download", action="store_false", dest="manifest_download",
help="Prevent download of the test manifest.")
parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
help="Multiplier relative to standard test timeout to use")
parser.add_argument("--run-by-dir", type=int, nargs="?", default=False,
help="Split run into groups by directories. With a parameter,"
"limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
"directory")
parser.add_argument("--processes", action="store", type=int, default=None,
help="Number of simultaneous processes to use")
parser.add_argument("--no-capture-stdio", action="store_true", default=False,
help="Don't capture stdio and write to logging")
parser.add_argument("--no-fail-on-unexpected", action="store_false",
default=True,
dest="fail_on_unexpected",
help="Exit with status code 0 when test expectations are violated")
mode_group = parser.add_argument_group("Mode")
mode_group.add_argument("--list-test-groups", action="store_true",
default=False,
help="List the top level directories containing tests that will run.")
mode_group.add_argument("--list-disabled", action="store_true",
default=False,
help="List the tests that are disabled on the current platform")
mode_group.add_argument("--list-tests", action="store_true",
default=False,
help="List all tests that will run")
stability_group = mode_group.add_mutually_exclusive_group()
stability_group.add_argument("--verify", action="store_true",
default=False,
help="Run a stability check on the selected tests")
stability_group.add_argument("--stability", action="store_true",
default=False,
help=argparse.SUPPRESS)
mode_group.add_argument("--verify-log-full", action="store_true",
default=False,
help="Output per-iteration test results when running verify")
mode_group.add_argument("--verify-repeat-loop", action="store",
default=10,
help="Number of iterations for a run that reloads each test without restart.",
type=int)
mode_group.add_argument("--verify-repeat-restart", action="store",
default=5,
help="Number of iterations, for a run that restarts the runner between each iteration",
type=int)
chaos_mode_group = mode_group.add_mutually_exclusive_group()
chaos_mode_group.add_argument("--verify-no-chaos-mode", action="store_false",
default=True,
dest="verify_chaos_mode",
help="Disable chaos mode when running on Firefox")
chaos_mode_group.add_argument("--verify-chaos-mode", action="store_true",
default=True,
dest="verify_chaos_mode",
help="Enable chaos mode when running on Firefox")
mode_group.add_argument("--verify-max-time", action="store",
default=None,
help="The maximum number of minutes for the job to run",
type=lambda x: timedelta(minutes=float(x)))
output_results_group = mode_group.add_mutually_exclusive_group()
output_results_group.add_argument("--verify-no-output-results", action="store_false",
dest="verify_output_results",
default=True,
help="Prints individuals test results and messages")
output_results_group.add_argument("--verify-output-results", action="store_true",
dest="verify_output_results",
default=True,
help="Disable printing individuals test results and messages")
test_selection_group = parser.add_argument_group("Test Selection")
test_selection_group.add_argument("--test-types", action="store",
nargs="*", default=wpttest.enabled_tests,
choices=wpttest.enabled_tests,
help="Test types to run")
test_selection_group.add_argument("--include", action="append",
help="URL prefix to include")
test_selection_group.add_argument("--exclude", action="append",
help="URL prefix to exclude")
test_selection_group.add_argument("--include-manifest", type=abs_path,
help="Path to manifest listing tests to include")
test_selection_group.add_argument("--skip-timeout", action="store_true",
help="Skip tests that are expected to time out")
test_selection_group.add_argument("--skip-implementation-status",
action="append",
choices=["not-implementing", "backlog", "implementing"],
help="Skip tests that have the given implementation status")
# TODO: Remove this when QUIC is enabled by default.
test_selection_group.add_argument("--enable-quic", action="store_true", default=False,
help="Enable tests that require QUIC server (default: false)")
test_selection_group.add_argument("--tag", action="append", dest="tags",
help="Labels applied to tests to include in the run. "
"Labels starting dir: are equivalent to top-level directories.")
test_selection_group.add_argument("--default-exclude", action="store_true",
default=False,
help="Only run the tests explicitly given in arguments. "
"No tests will run if the list is empty, and the "
"program will exit with status code 0.")
debugging_group = parser.add_argument_group("Debugging")
debugging_group.add_argument('--debugger', const="__default__", nargs="?",
help="run under a debugger, e.g. gdb or valgrind")
debugging_group.add_argument('--debugger-args', help="arguments to the debugger")
debugging_group.add_argument("--rerun", action="store", type=int, default=1,
help="Number of times to re run each test without restarts")
debugging_group.add_argument("--repeat", action="store", type=int, default=1,
help="Number of times to run the tests, restarting between each run")
debugging_group.add_argument("--repeat-until-unexpected", action="store_true", default=None,
help="Run tests in a loop until one returns an unexpected result")
debugging_group.add_argument('--pause-after-test', action="store_true", default=None,
help="Halt the test runner after each test (this happens by default if only a single test is run)")
debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false",
help="Don't halt the test runner irrespective of the number of tests run")
debugging_group.add_argument('--pause-on-unexpected', action="store_true",
help="Halt the test runner when an unexpected result is encountered")
debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected",
default=True, action="store_false",
help="Don't restart on an unexpected result")
debugging_group.add_argument("--symbols-path", action="store", type=url_or_path,
help="Path or url to symbols file used to analyse crash minidumps.")
debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
help="Path to stackwalker program used to analyse minidumps.")
debugging_group.add_argument("--pdb", action="store_true",
help="Drop into pdb on python exception")
config_group = parser.add_argument_group("Configuration")
config_group.add_argument("--binary", action="store",
type=abs_path, help="Desktop binary to run tests against")
config_group.add_argument('--binary-arg',
default=[], action="append", dest="binary_args",
help="Extra argument for the binary")
config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY",
type=abs_path, help="WebDriver server binary to use")
config_group.add_argument('--webdriver-arg',
default=[], action="append", dest="webdriver_args",
help="Extra argument for the WebDriver binary")
config_group.add_argument("--package-name", action="store",
help="Android package name to run tests against")
config_group.add_argument("--device-serial", action="store",
help="Running Android instance to connect to, if not emulator-5554")
config_group.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
help="Path to root directory containing test metadata"),
config_group.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
help="Path to root directory containing test files"),
config_group.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path",
help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)")
config_group.add_argument("--run-info", action="store", type=abs_path,
help="Path to directory containing extra json files to add to run info")
config_group.add_argument("--product", action="store", choices=product_choices,
default=None, help="Browser against which to run tests")
config_group.add_argument("--browser-version", action="store",
default=None, help="Informative string detailing the browser "
"release version. This is included in the run_info data.")
config_group.add_argument("--browser-channel", action="store",
default=None, help="Informative string detailing the browser "
"release channel. This is included in the run_info data.")
config_group.add_argument("--config", action="store", type=abs_path, dest="config",
help="Path to config file")
config_group.add_argument("--install-fonts", action="store_true",
default=None,
help="Install additional system fonts on your system")
config_group.add_argument("--no-install-fonts", dest="install_fonts", action="store_false",
help="Do not install additional system fonts on your system")
config_group.add_argument("--font-dir", action="store", type=abs_path, dest="font_dir",
help="Path to local font installation directory", default=None)
config_group.add_argument("--headless", action="store_true",
help="Run browser in headless mode", default=None)
config_group.add_argument("--no-headless", action="store_false", dest="headless",
help="Don't run browser in headless mode")
config_group.add_argument("--instrument-to-file", action="store",
help="Path to write instrumentation logs to")
build_type = parser.add_mutually_exclusive_group()
build_type.add_argument("--debug-build", dest="debug", action="store_true",
default=None,
help="Build is a debug build (overrides any mozinfo file)")
build_type.add_argument("--release-build", dest="debug", action="store_false",
default=None,
help="Build is a release (overrides any mozinfo file)")
chunking_group = parser.add_argument_group("Test Chunking")
chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
help="Total number of chunks to use")
chunking_group.add_argument("--this-chunk", action="store", type=int, default=1,
help="Chunk number to run")
chunking_group.add_argument("--chunk-type", action="store", choices=["none", "hash", "dir_hash"],
default=None, help="Chunking type to use")
ssl_group = parser.add_argument_group("SSL/TLS")
ssl_group.add_argument("--ssl-type", action="store", default=None,
choices=["openssl", "pregenerated", "none"],
help="Type of ssl support to enable (running without ssl may lead to spurious errors)")
ssl_group.add_argument("--openssl-binary", action="store",
help="Path to openssl binary", default="openssl")
ssl_group.add_argument("--certutil-binary", action="store",
help="Path to certutil binary for use with Firefox + ssl")
ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path,
help="Path to ca certificate when using pregenerated ssl certificates")
ssl_group.add_argument("--host-key-path", action="store", type=abs_path,
help="Path to host private key when using pregenerated ssl certificates")
ssl_group.add_argument("--host-cert-path", action="store", type=abs_path,
help="Path to host certificate when using pregenerated ssl certificates")
gecko_group = parser.add_argument_group("Gecko-specific")
gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
help="Path to the folder containing browser prefs")
gecko_group.add_argument("--preload-browser", dest="preload_browser", action="store_true",
default=None, help="Preload a gecko instance for faster restarts")
gecko_group.add_argument("--no-preload-browser", dest="preload_browser", action="store_false",
default=None, help="Don't preload a gecko instance for faster restarts")
gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True,
help="Run tests without electrolysis preferences")
gecko_group.add_argument("--enable-webrender", dest="enable_webrender", action="store_true", default=None,
help="Enable the WebRender compositor in Gecko (defaults to disabled).")
gecko_group.add_argument("--no-enable-webrender", dest="enable_webrender", action="store_false",
help="Disable the WebRender compositor in Gecko.")
gecko_group.add_argument("--enable-fission", dest="enable_fission", action="store_true", default=None,
help="Enable fission in Gecko (defaults to disabled).")
gecko_group.add_argument("--no-enable-fission", dest="enable_fission", action="store_false",
help="Disable fission in Gecko.")
gecko_group.add_argument("--stackfix-dir", dest="stackfix_dir", action="store",
help="Path to directory containing assertion stack fixing scripts")
gecko_group.add_argument("--setpref", dest="extra_prefs", action='append',
default=[], metavar="PREF=VALUE",
help="Defines an extra user preference (overrides those in prefs_root)")
gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true", default=None,
help="Enable leak checking (enabled by default for debug builds, "
"silently ignored for opt, mobile)")
gecko_group.add_argument("--no-leak-check", dest="leak_check", action="store_false", default=None,
help="Disable leak checking")
gecko_group.add_argument("--stylo-threads", action="store", type=int, default=1,
help="Number of parallel threads to use for stylo")
gecko_group.add_argument("--reftest-internal", dest="reftest_internal", action="store_true",
default=None, help="Enable reftest runner implemented inside Marionette")
gecko_group.add_argument("--reftest-external", dest="reftest_internal", action="store_false",
help="Disable reftest runner implemented inside Marionette")
gecko_group.add_argument("--reftest-screenshot", dest="reftest_screenshot", action="store",
choices=["always", "fail", "unexpected"], default=None,
help="With --reftest-internal, when to take a screenshot")
gecko_group.add_argument("--chaos", dest="chaos_mode_flags", action="store",
nargs="?", const=0xFFFFFFFF, type=int,
help="Enable chaos mode with the specified feature flag "
"(see http://searchfox.org/mozilla-central/source/mfbt/ChaosMode.h for "
"details). If no value is supplied, all features are activated")
servo_group = parser.add_argument_group("Servo-specific")
servo_group.add_argument("--user-stylesheet",
default=[], action="append", dest="user_stylesheets",
help="Inject a user CSS stylesheet into every test.")
sauce_group = parser.add_argument_group("Sauce Labs-specific")
sauce_group.add_argument("--sauce-browser", dest="sauce_browser",
help="Sauce Labs browser name")
sauce_group.add_argument("--sauce-platform", dest="sauce_platform",
help="Sauce Labs OS platform")
sauce_group.add_argument("--sauce-version", dest="sauce_version",
help="Sauce Labs browser version")
sauce_group.add_argument("--sauce-build", dest="sauce_build",
help="Sauce Labs build identifier")
sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*",
help="Sauce Labs identifying tag", default=[])
sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id",
help="Sauce Connect tunnel identifier")
sauce_group.add_argument("--sauce-user", dest="sauce_user",
help="Sauce Labs user name")
sauce_group.add_argument("--sauce-key", dest="sauce_key",
default=os.environ.get("SAUCE_ACCESS_KEY"),
help="Sauce Labs access key")
sauce_group.add_argument("--sauce-connect-binary",
dest="sauce_connect_binary",
help="Path to Sauce Connect binary")
sauce_group.add_argument("--sauce-init-timeout", action="store",
type=int, default=30,
help="Number of seconds to wait for Sauce "
"Connect tunnel to be available before "
"aborting")
sauce_group.add_argument("--sauce-connect-arg", action="append",
default=[], dest="sauce_connect_args",
help="Command-line argument to forward to the "
"Sauce Connect binary (repeatable)")
webkit_group = parser.add_argument_group("WebKit-specific")
webkit_group.add_argument("--webkit-port", dest="webkit_port",
help="WebKit port")
parser.add_argument("test_list", nargs="*",
help="List of URLs for tests to run, or paths including tests to run. "
"(equivalent to --include)")
def screenshot_api_wrapper(formatter, api):
formatter.api = api
return formatter
commandline.fmt_options["api"] = (screenshot_api_wrapper,
"Cache API (default: %s)" % wptscreenshot.DEFAULT_API,
{"wptscreenshot"}, "store")
commandline.log_formatters["chromium"] = (chromium.ChromiumFormatter, "Chromium Layout Tests format")
commandline.log_formatters["wptreport"] = (wptreport.WptreportFormatter, "wptreport format")
commandline.log_formatters["wptscreenshot"] = (wptscreenshot.WptscreenshotFormatter, "wpt.fyi screenshots")
commandline.add_logging_group(parser)
return parser
def set_from_config(kwargs):
if kwargs["config"] is None:
config_path = config.path()
else:
config_path = kwargs["config"]
kwargs["config_path"] = config_path
kwargs["config"] = config.read(kwargs["config_path"])
keys = {"paths": [("prefs", "prefs_root", True),
("run_info", "run_info", True)],
"web-platform-tests": [("remote_url", "remote_url", False),
("branch", "branch", False),
("sync_path", "sync_path", True)],
"SSL": [("openssl_binary", "openssl_binary", True),
("certutil_binary", "certutil_binary", True),
("ca_cert_path", "ca_cert_path", True),
("host_cert_path", "host_cert_path", True),
("host_key_path", "host_key_path", True)]}
for section, values in iteritems(keys):
for config_value, kw_value, is_path in values:
if kw_value in kwargs and kwargs[kw_value] is None:
if not is_path:
new_value = kwargs["config"].get(section, config.ConfigDict({})).get(config_value)
else:
new_value = kwargs["config"].get(section, config.ConfigDict({})).get_path(config_value)
kwargs[kw_value] = new_value
kwargs["test_paths"] = get_test_paths(kwargs["config"])
if kwargs["tests_root"]:
if "/" not in kwargs["test_paths"]:
kwargs["test_paths"]["/"] = {}
kwargs["test_paths"]["/"]["tests_path"] = kwargs["tests_root"]
if kwargs["metadata_root"]:
if "/" not in kwargs["test_paths"]:
kwargs["test_paths"]["/"] = {}
kwargs["test_paths"]["/"]["metadata_path"] = kwargs["metadata_root"]
if kwargs.get("manifest_path"):
if "/" not in kwargs["test_paths"]:
kwargs["test_paths"]["/"] = {}
kwargs["test_paths"]["/"]["manifest_path"] = kwargs["manifest_path"]
kwargs["suite_name"] = kwargs["config"].get("web-platform-tests", {}).get("name", "web-platform-tests")
check_paths(kwargs)
def get_test_paths(config):
# Set up test_paths
test_paths = OrderedDict()
for section in iterkeys(config):
if section.startswith("manifest:"):
manifest_opts = config.get(section)
url_base = manifest_opts.get("url_base", "/")
test_paths[url_base] = {
"tests_path": manifest_opts.get_path("tests"),
"metadata_path": manifest_opts.get_path("metadata"),
}
if "manifest" in manifest_opts:
test_paths[url_base]["manifest_path"] = manifest_opts.get_path("manifest")
return test_paths
def exe_path(name):
if name is None:
return
path = find_executable(name)
if path and os.access(path, os.X_OK):
return path
else:
return None
def check_paths(kwargs):
for test_paths in itervalues(kwargs["test_paths"]):
if not ("tests_path" in test_paths and
"metadata_path" in test_paths):
print("Fatal: must specify both a test path and metadata path")
sys.exit(1)
if "manifest_path" not in test_paths:
test_paths["manifest_path"] = os.path.join(test_paths["metadata_path"],
"MANIFEST.json")
for key, path in iteritems(test_paths):
name = key.split("_", 1)[0]
if name == "manifest":
# For the manifest we can create it later, so just check the path
# actually exists
path = os.path.dirname(path)
if not os.path.exists(path):
print("Fatal: %s path %s does not exist" % (name, path))
sys.exit(1)
if not os.path.isdir(path):
print("Fatal: %s path %s is not a directory" % (name, path))
sys.exit(1)
def check_args(kwargs):
set_from_config(kwargs)
if kwargs["product"] is None:
kwargs["product"] = "firefox"
if kwargs["manifest_update"] is None:
kwargs["manifest_update"] = True
if "sauce" in kwargs["product"]:
kwargs["pause_after_test"] = False
if kwargs["test_list"]:
if kwargs["include"] is not None:
kwargs["include"].extend(kwargs["test_list"])
else:
kwargs["include"] = kwargs["test_list"]
if kwargs["run_info"] is None:
kwargs["run_info"] = kwargs["config_path"]
if kwargs["this_chunk"] > 1:
require_arg(kwargs, "total_chunks", lambda x: x >= kwargs["this_chunk"])
if kwargs["chunk_type"] is None:
if kwargs["total_chunks"] > 1:
kwargs["chunk_type"] = "dir_hash"
else:
kwargs["chunk_type"] = "none"
if kwargs["processes"] is None:
kwargs["processes"] = 1
if kwargs["debugger"] is not None:
import mozdebug
if kwargs["debugger"] == "__default__":
kwargs["debugger"] = mozdebug.get_default_debugger_name()
debug_info = mozdebug.get_debugger_info(kwargs["debugger"],
kwargs["debugger_args"])
if debug_info and debug_info.interactive:
if kwargs["processes"] != 1:
kwargs["processes"] = 1
kwargs["no_capture_stdio"] = True
kwargs["debug_info"] = debug_info
else:
kwargs["debug_info"] = None
if kwargs["binary"] is not None:
if not os.path.exists(kwargs["binary"]):
print("Binary path %s does not exist" % kwargs["binary"], file=sys.stderr)
sys.exit(1)
if kwargs["ssl_type"] is None:
if None not in (kwargs["ca_cert_path"], kwargs["host_cert_path"], kwargs["host_key_path"]):
kwargs["ssl_type"] = "pregenerated"
elif exe_path(kwargs["openssl_binary"]) is not None:
kwargs["ssl_type"] = "openssl"
else:
kwargs["ssl_type"] = "none"
if kwargs["ssl_type"] == "pregenerated":
require_arg(kwargs, "ca_cert_path", lambda x:os.path.exists(x))
require_arg(kwargs, "host_cert_path", lambda x:os.path.exists(x))
require_arg(kwargs, "host_key_path", lambda x:os.path.exists(x))
elif kwargs["ssl_type"] == "openssl":
path = exe_path(kwargs["openssl_binary"])
if path is None:
print("openssl-binary argument missing or not a valid executable", file=sys.stderr)
sys.exit(1)
kwargs["openssl_binary"] = path
if kwargs["ssl_type"] != "none" and kwargs["product"] == "firefox" and kwargs["certutil_binary"]:
path = exe_path(kwargs["certutil_binary"])
if path is None:
print("certutil-binary argument missing or not a valid executable", file=sys.stderr)
sys.exit(1)
kwargs["certutil_binary"] = path
if kwargs['extra_prefs']:
missing = any('=' not in prefarg for prefarg in kwargs['extra_prefs'])
if missing:
print("Preferences via --setpref must be in key=value format", file=sys.stderr)
sys.exit(1)
kwargs['extra_prefs'] = [tuple(prefarg.split('=', 1)) for prefarg in
kwargs['extra_prefs']]
if kwargs["reftest_internal"] is None:
kwargs["reftest_internal"] = True
if kwargs["reftest_screenshot"] is None:
kwargs["reftest_screenshot"] = "unexpected"
if kwargs["enable_webrender"] is None:
kwargs["enable_webrender"] = False
if kwargs["preload_browser"] is None:
# Default to preloading a gecko instance if we're only running a single process
kwargs["preload_browser"] = kwargs["processes"] == 1
return kwargs
def check_args_update(kwargs):
set_from_config(kwargs)
if kwargs["product"] is None:
kwargs["product"] = "firefox"
if kwargs["patch"] is None:
kwargs["patch"] = kwargs["sync"]
for item in kwargs["run_log"]:
if os.path.isdir(item):
print("Log file %s is a directory" % item, file=sys.stderr)
sys.exit(1)
return kwargs
def create_parser_update(product_choices=None):
from mozlog.structured import commandline
from . import products
if product_choices is None:
config_data = config.load()
product_choices = products.products_enabled(config_data)
parser = argparse.ArgumentParser("web-platform-tests-update",
description="Update script for web-platform-tests tests.")
parser.add_argument("--product", action="store", choices=product_choices,
default=None, help="Browser for which metadata is being updated")
parser.add_argument("--config", action="store", type=abs_path, help="Path to config file")
parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
help="Path to the folder containing test metadata"),
parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
help="Path to web-platform-tests"),
parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path",
help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)")
parser.add_argument("--sync-path", action="store", type=abs_path,
help="Path to store git checkout of web-platform-tests during update"),
parser.add_argument("--remote_url", action="store",
help="URL of web-platfrom-tests repository to sync against"),
parser.add_argument("--branch", action="store", type=abs_path,
help="Remote branch to sync against")
parser.add_argument("--rev", action="store", help="Revision to sync to")
parser.add_argument("--patch", action="store_true", dest="patch", default=None,
help="Create a VCS commit containing the changes.")
parser.add_argument("--no-patch", action="store_false", dest="patch",
help="Don't create a VCS commit containing the changes.")
parser.add_argument("--sync", dest="sync", action="store_true", default=False,
help="Sync the tests with the latest from upstream (implies --patch)")
parser.add_argument("--full", action="store_true", default=False,
help=("For all tests that are updated, remove any existing conditions and missing subtests"))
parser.add_argument("--disable-intermittent", nargs="?", action="store", const="unstable", default=None,
help=("Reason for disabling tests. When updating test results, disable tests that have "
"inconsistent results across many runs with the given reason."))
parser.add_argument("--update-intermittent", action="store_true", default=False,
help=("Update test metadata with expected intermittent statuses."))
parser.add_argument("--remove-intermittent", action="store_true", default=False,
help=("Remove obsolete intermittent statuses from expected statuses."))
parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True,
help=("Don't remove metadata files that no longer correspond to a test file"))
parser.add_argument("--no-store-state", action="store_false", dest="store_state",
help="Store state so that steps can be resumed after failure")
parser.add_argument("--continue", action="store_true",
help="Continue a previously started run of the update script")
parser.add_argument("--abort", action="store_true",
help="Clear state from a previous incomplete run of the update script")
parser.add_argument("--exclude", action="store", nargs="*",
help="List of glob-style paths to exclude when syncing tests")
parser.add_argument("--include", action="store", nargs="*",
help="List of glob-style paths to include which would otherwise be excluded when syncing tests")
parser.add_argument("--extra-property", action="append", default=[],
help="Extra property from run_info.json to use in metadata update")
# Should make this required iff run=logfile
parser.add_argument("run_log", nargs="*", type=abs_path,
help="Log file from run of tests")
commandline.add_logging_group(parser)
return parser
def create_parser_reduce(product_choices=None):
parser = create_parser(product_choices)
parser.add_argument("target", action="store", help="Test id that is unstable")
return parser
def parse_args():
parser = create_parser()
rv = vars(parser.parse_args())
check_args(rv)
return rv
def parse_args_update():
parser = create_parser_update()
rv = vars(parser.parse_args())
check_args_update(rv)
return rv
def parse_args_reduce():
parser = create_parser_reduce()
rv = vars(parser.parse_args())
check_args(rv)
return rv
| mpl-2.0 |
lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pip/commands/check.py | 336 | 1382 | import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
| mit |
jacquerie/inspire-next | inspirehep/alembic/cb9f81e8251c_add_record_metadata_indices.py | 2 | 1950 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Add record metadata indices."""
from __future__ import absolute_import, division, print_function
from alembic import op
# revision identifiers, used by Alembic.
revision = 'cb9f81e8251c'
down_revision = 'fddb3cfe7a9c'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
op.execute(
"CREATE INDEX idxgindoctype ON records_metadata USING gin ((json -> 'document_type'))"
)
op.execute(
"CREATE INDEX idxgintitles ON records_metadata USING gin ((json -> 'titles'))"
)
op.execute(
"CREATE INDEX idxginjournaltitle ON records_metadata USING gin ((json -> 'journal_title'))"
)
op.execute(
"CREATE INDEX idxgincollections ON records_metadata USING gin ((json -> '_collections'))"
)
def downgrade():
"""Downgrade database."""
op.execute("DROP INDEX IF EXISTS idxgindoctype")
op.execute("DROP INDEX IF EXISTS idxgintitles")
op.execute("DROP INDEX IF EXISTS idxginjournaltitle")
op.execute("DROP INDEX IF EXISTS idxgincollections")
| gpl-3.0 |
potash/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
ROAND/dota2herovoices | .buildozer/venv/lib/python2.7/site-packages/distribute-0.6.49-py2.7.egg/setuptools/command/build_ext.py | 136 | 11704 | from distutils.command.build_ext import build_ext as _du_build_ext
try:
# Attempt to use Pyrex for building extensions, if available
from Pyrex.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
import os, sys
from distutils.file_util import copy_file
from setuptools.extension import Library
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
from distutils.sysconfig import _config_vars
from distutils import log
from distutils.errors import *
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
from dl import RTLD_NOW
have_rtld = True
use_stubs = True
except ImportError:
pass
def if_dl(s):
if have_rtld:
return s
return ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,os.path.basename(filename))
src_filename = os.path.join(self.build_lib,filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'):
# Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
def swig_sources(self, sources, *otherargs):
# first do any Pyrex processing
sources = _build_ext.swig_sources(self, sources) or sources
# Then do any actual SWIG stuff on the remainder
return _du_build_ext.swig_sources(self, sources, *otherargs)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self,fullname)
if fullname not in self.ext_map:
return filename
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
else:
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext,Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = ext._links_to_dynamic = \
self.shlibs and self.links_to_dynamic(ext) or False
ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib,filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _config_vars.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
_config_vars['CCSHARED'] = " -dynamiclib"
_config_vars['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_config_vars.clear()
_config_vars.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext,Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self,ext)
def build_extension(self, ext):
_compiler = self.compiler
try:
if isinstance(ext,Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self,ext)
if ext._needs_stub:
self.write_stub(
self.get_finalized_command('build_py').build_lib, ext
)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
def get_outputs(self):
outputs = _build_ext.get_outputs(self)
optimize = self.get_finalized_command('build_py').optimize
for ext in self.extensions:
if ext._needs_stub:
base = os.path.join(self.build_lib, *ext._full_name.split('.'))
outputs.append(base+'.py')
outputs.append(base+'.pyc')
if optimize:
outputs.append(base+'.pyo')
return outputs
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file+" already exists! Please delete.")
if not self.dry_run:
f = open(stub_file,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp"+if_dl(", dl"),
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name=='nt':
# Build shared libraries
#
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
): self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
#libraries=None, library_dirs=None, runtime_library_dirs=None,
#export_symbols=None, extra_preargs=None, extra_postargs=None,
#build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir,filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
| gpl-2.0 |
simleo/openmicroscopy | components/tools/OmeroWeb/test/integration/test_links.py | 2 | 9784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests creation and deletion of links between e.g. Projects & Datasets etc.
"""
import omero
from omero.rtypes import rstring
from omeroweb.testlib import IWebTest
from omeroweb.testlib import _csrf_post_response, _get_response
from omeroweb.testlib import _csrf_delete_response
import json
from time import sleep
from django.core.urlresolvers import reverse
import pytest
class TestLinks(IWebTest):
"""
Tests creation and deletion of links between
e.g. Projects & Datasets etc.
"""
@pytest.fixture
def project(self):
"""Returns a new OMERO Project with required fields set."""
project = omero.model.ProjectI()
project.name = rstring(self.uuid())
return self.update.saveAndReturnObject(project)
@pytest.fixture
def dataset(self):
"""Returns a new OMERO Dataset with required fields set."""
dataset = omero.model.DatasetI()
dataset.name = rstring(self.uuid())
return self.update.saveAndReturnObject(dataset)
@pytest.fixture
def datasets(self):
"""Returns 2 new OMERO Datasets with required fields set."""
dataset = omero.model.DatasetI()
dataset.name = rstring("A_%s" % self.uuid())
dataset2 = omero.model.DatasetI()
dataset2.name = rstring("B_%s" % self.uuid())
return self.update.saveAndReturnArray([dataset, dataset2])
@pytest.fixture
def images(self):
image = self.new_image(name="A_%s" % self.uuid())
image2 = self.new_image(name="B_%s" % self.uuid())
return self.update.saveAndReturnArray([image, image2])
@pytest.fixture
def screens(self):
"""Returns 2 new OMERO Screens with required fields set."""
screen = omero.model.ScreenI()
screen.name = rstring("A_%s" % self.uuid())
screen2 = omero.model.ScreenI()
screen2.name = rstring("B_%s" % self.uuid())
return self.update.saveAndReturnArray([screen, screen2])
@pytest.fixture
def plates(self):
"""Returns 2 new OMERO Plates with required fields set."""
plate = omero.model.PlateI()
plate.name = rstring("A_%s" % self.uuid())
plate2 = omero.model.PlateI()
plate2.name = rstring("B_%s" % self.uuid())
return self.update.saveAndReturnArray([plate, plate2])
def test_link_project_datasets(self, project, datasets):
# Link Project to Datasets
request_url = reverse("api_links")
pid = project.id.val
dids = [d.id.val for d in datasets]
data = {
'project': {pid: {'dataset': dids}}
}
rsp = _csrf_post_response_json(self.django_client, request_url, data)
assert rsp == {"success": True}
# Check links
request_url = reverse("api_datasets")
rsp = _get_response_json(self.django_client, request_url, {'id': pid})
# Expect a single Dataset with correct id
assert len(rsp['datasets']) == 2
assert rsp['datasets'][0]['id'] == dids[0]
assert rsp['datasets'][1]['id'] == dids[1]
def test_link_datasets_images(self, datasets, images):
# Link Datasets to Images
request_url = reverse("api_links")
dids = [d.id.val for d in datasets]
iids = [i.id.val for i in images]
# Link first dataset to first image,
# Second dataset linked to both images
data = {
'dataset': {dids[0]: {'image': [iids[0]]},
dids[1]: {'image': iids}}
}
rsp = _csrf_post_response_json(self.django_client, request_url, data)
assert rsp == {"success": True}
# Check links
images_url = reverse("api_images")
# First Dataset has single image
rsp = _get_response_json(self.django_client,
images_url, {'id': dids[0]})
assert len(rsp['images']) == 1
assert rsp['images'][0]['id'] == iids[0]
# Second Dataset has both images
rsp = _get_response_json(self.django_client,
images_url, {'id': dids[1]})
assert len(rsp['images']) == 2
assert rsp['images'][0]['id'] == iids[0]
assert rsp['images'][1]['id'] == iids[1]
# Link BOTH images to first Dataset. Shouldn't get any
# Validation Exception, even though first image is already linked
data = {
'dataset': {dids[0]: {'image': iids}}
}
rsp = _csrf_post_response_json(self.django_client, request_url, data)
assert rsp == {"success": True}
# Check first Dataset now has both images
rsp = _get_response_json(self.django_client,
images_url, {'id': dids[0]})
assert len(rsp['images']) == 2
assert rsp['images'][0]['id'] == iids[0]
assert rsp['images'][1]['id'] == iids[1]
def test_link_unlink_tagset_tags(self):
"""
Tests linking of tagset to tag, then unlinking
"""
tag = self.make_tag()
tagset = self.make_tag(ns=omero.constants.metadata.NSINSIGHTTAGSET)
tagId = tag.id.val
tagsetId = tagset.id.val
links_url = reverse("api_links")
# Link tagset to tag
data = {
'tagset': {tagsetId: {'tag': [tagId]}}
}
rsp = _csrf_post_response_json(self.django_client, links_url, data)
assert rsp == {"success": True}
# Check that tag is listed under tagset...
tags_url = reverse("api_tags_and_tagged")
r = _get_response_json(self.django_client, tags_url, {'id': tagsetId})
assert len(r['tags']) == 1
assert r['tags'][0]['id'] == tagId
# Unlink first Tag from Tagset
# data {} is same as for creating link above
response = _csrf_delete_response(self.django_client,
links_url,
json.dumps(data),
content_type="application/json")
response = json.loads(response.content)
assert response["success"]
# Since the Delete is ansync - need to check repeatedly for deletion
for i in range(10):
rsp = _get_response_json(self.django_client,
tags_url, {'id': tagsetId})
if len(rsp['tags']) == 0:
break
sleep(0.5)
# Check that link has been deleted
assert len(rsp['tags']) == 0
def test_unlink_screen_plate(self, screens, plates):
# Link both plates to both screens
request_url = reverse("api_links")
sids = [s.id.val for s in screens]
pids = [p.id.val for p in plates]
data = {
'screen': {sids[0]: {'plate': pids},
sids[1]: {'plate': pids}}
}
rsp = _csrf_post_response_json(self.django_client, request_url, data)
assert rsp == {"success": True}
# Confirm that first Screen linked to 2 Plates
plates_url = reverse("api_plates")
rsp = _get_response_json(self.django_client,
plates_url, {'id': sids[0]})
assert len(rsp['plates']) == 2
# Unlink first Plate from first Screen
request_url = reverse("api_links")
data = {
'screen': {sids[0]: {'plate': pids[:1]}}
}
response = _csrf_delete_response(self.django_client,
request_url,
json.dumps(data),
content_type="application/json")
# Returns remaining link from 2nd Screen to first Plate
response = json.loads(response.content)
assert response == {"success": True,
"screen": {str(sids[1]): {"plate": pids[:1]}}
}
# Since the Delete is ansync - need to check repeatedly for deletion
# by counting plates under screen...
plates_url = reverse("api_plates")
for i in range(10):
rsp = _get_response_json(self.django_client,
plates_url, {'id': sids[0]})
if len(rsp['plates']) == 1:
break
sleep(0.5)
# Check that link has been deleted, leaving 2nd plate under 1st screen
assert len(rsp['plates']) == 1
assert rsp['plates'][0]['id'] == pids[1]
def _get_response_json(django_client, request_url, query_string):
rsp = _get_response(django_client, request_url,
query_string, status_code=200)
return json.loads(rsp.content)
def _csrf_post_response_json(django_client, request_url, data):
rsp = _csrf_post_response(django_client,
request_url,
json.dumps(data),
content_type="application/json")
return json.loads(rsp.content)
| gpl-2.0 |
Phil9l/cosmos | code/data_structures/src/stack/reverse_stack/reverse_stack.py | 3 | 1726 | """
Part of Cosmos by OpenGenus Foundation
"""
# stack class
class Stack:
def __init__(self):
self.items = []
# check if the stack is empty
def isEmpty(self):
return self.items == []
# push item into the stack
def push(self, item):
self.items.append(item)
# pop item from the stack
def pop(self):
return self.items.pop()
# get latest item in the stack
def peek(self):
return self.items[len(self.items) - 1]
# get stack size
def size(self):
return len(self.items)
# reverse stack function
def reverse(stack):
# temp list
items = []
# pop items in the stack and append to the list
# this will reverse items in the stack
while not stack.isEmpty():
items.append(stack.pop())
# push reversed item back to the stack
for item in items:
stack.push(item)
# return
return stack
if __name__ == "__main__":
# init the stack
inputStack = Stack()
print(
"Enter the item to push into the stack and press Enter (type 'rev' to reverse the stack)"
)
while True:
# get input item
inputItem = input("input item: ")
if inputItem == "rev" and inputStack.isEmpty():
# if stack is empty, return message
print("The stack is empty")
print("========== +++++ ===========")
elif inputItem == "rev":
# reverse the stack
reverseStack = reverse(inputStack)
print("reversed stack: ", reverseStack.items)
break
else:
# push item into the stack
inputStack.push(inputItem)
print("current stack:", inputStack.items)
| gpl-3.0 |
aprefontaine/TMScheduler | db/base.py | 2 | 8598 | import datetime
from .creation import DatabaseCreation
from ..utils import appid, have_appserver, on_production_server
from djangotoolbox.db.base import NonrelDatabaseFeatures, \
NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient, \
NonrelDatabaseValidation, NonrelDatabaseIntrospection
import logging, os
from django.db.backends.util import format_number
def auth_func():
import getpass
return raw_input('Login via Google Account:'), getpass.getpass('Password:')
def rpc_server_factory(*args, ** kwargs):
from google.appengine.tools import appengine_rpc
kwargs['save_cookies'] = True
return appengine_rpc.HttpRpcServer(*args, ** kwargs)
def get_datastore_paths(options):
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = options.get('datastore_path',
dev_appserver_main.DEFAULT_ARGS['datastore_path'].replace(
'dev_appserver', 'django_%s' % appid))
blobstore_path = options.get('blobstore_path',
dev_appserver_main.DEFAULT_ARGS['blobstore_path'].replace(
'dev_appserver', 'django_%s' % appid))
history_path = options.get('history_path',
dev_appserver_main.DEFAULT_ARGS['history_path'].replace(
'dev_appserver', 'django_%s' % appid))
return datastore_path, blobstore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None, None
datastore_path, blobstore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace('.datastore', '.testdatastore')
blobstore_path = blobstore_path.replace('.blobstore', '.testblobstore')
history_path = history_path.replace('.datastore', '.testdatastore')
return datastore_path, blobstore_path, history_path
def destroy_datastore(*args):
"""Destroys the appengine datastore at the specified paths."""
for path in args:
if not path:
continue
try:
os.remove(path)
except OSError, error:
if error.errno != 2:
logging.error("Failed to clear datastore: %s" % error)
class DatabaseFeatures(NonrelDatabaseFeatures):
pass
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
DEFAULT_MAX_DIGITS = 16
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None:
return None
sign = value < 0 and u'-' or u''
if sign:
value = abs(value)
if max_digits is None:
max_digits = self.DEFAULT_MAX_DIGITS
if decimal_places is None:
value = unicode(value)
else:
value = format_number(value, max_digits, decimal_places)
decimal_places = decimal_places or 0
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u"0" * (max_digits - decimal_places - n) + value
return sign + value
def sql_flush(self, style, tables, sequences):
self.connection.flush()
return []
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
pass
class DatabaseWrapper(NonrelDatabaseWrapper):
def __init__(self, *args, **kwds):
super(DatabaseWrapper, self).__init__(*args, **kwds)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.validation = DatabaseValidation(self)
self.introspection = DatabaseIntrospection(self)
options = self.settings_dict
self.use_test_datastore = options.get('use_test_datastore', False)
self.test_datastore_inmemory = options.get('test_datastore_inmemory', True)
self.remote = options.get('remote', False)
if on_production_server:
self.remote = False
self.remote_app_id = options.get('remote_id', appid)
self.remote_host = options.get('remote_host', '%s.appspot.com' % self.remote_app_id)
self.remote_url = options.get('remote_url', '/remote_api')
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths(self.settings_dict)
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
if not have_appserver:
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['blobstore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
# If we're supposed to set up the remote_api, do that now.
if self.remote:
self.setup_remote()
def setup_remote(self):
self.remote = True
logging.info('Setting up remote_api for "%s" at http://%s%s' %
(self.remote_app_id, self.remote_host, self.remote_url)
)
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
remote_api_stub.ConfigureRemoteDatastore(self.remote_app_id,
self.remote_url, auth_func, self.remote_host,
rpc_server_factory=rpc_server_factory)
logging.info('Now using the remote datastore for "%s" at http://%s%s' %
(self.remote_app_id, self.remote_host, self.remote_url))
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
if self.remote:
import random, string
code = ''.join([random.choice(string.ascii_letters) for x in range(4)])
print '\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print "Warning! You're about to delete the *production* datastore!"
print 'Only models defined in your INSTALLED_APPS can be removed!'
print 'If you want to clear the whole datastore you have to use the ' \
'datastore viewer in the dashboard. Also, in order to delete all '\
'unneeded indexes you have to run appcfg.py vacuum_indexes.'
print 'In order to proceed you have to enter the following code:'
print code
response = raw_input('Repeat: ')
if code == response:
print 'Deleting...'
from django.db import models
from google.appengine.api import datastore as ds
for model in models.get_models():
print 'Deleting %s...' % model._meta.db_table
while True:
data = ds.Query(model._meta.db_table, keys_only=True).Get(200)
if not data:
break
ds.Delete(data)
print "Datastore flushed! Please check your dashboard's " \
'datastore viewer for any remaining entities and remove ' \
'all unneeded indexes with manage.py vacuum_indexes.'
else:
print 'Aborting'
exit()
else:
destroy_datastore(*self._get_paths())
self._setup_stubs()
| bsd-3-clause |
nhomar/odoo-mirror | addons/mail/ir_attachment.py | 378 | 5643 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import os.path
class IrAttachment(osv.Model):
""" Update partner to add a field about notification preferences """
_name = "ir.attachment"
_inherit = 'ir.attachment'
_fileext_to_type = {
'7z': 'archive',
'aac': 'audio',
'ace': 'archive',
'ai': 'vector',
'aiff': 'audio',
'apk': 'archive',
'app': 'binary',
'as': 'script',
'asf': 'video',
'ass': 'text',
'avi': 'video',
'bat': 'script',
'bin': 'binary',
'bmp': 'image',
'bzip2': 'archive',
'c': 'script',
'cab': 'archive',
'cc': 'script',
'ccd': 'disk',
'cdi': 'disk',
'cdr': 'vector',
'cer': 'certificate',
'cgm': 'vector',
'cmd': 'script',
'coffee': 'script',
'com': 'binary',
'cpp': 'script',
'crl': 'certificate',
'crt': 'certificate',
'cs': 'script',
'csr': 'certificate',
'css': 'html',
'csv': 'spreadsheet',
'cue': 'disk',
'd': 'script',
'dds': 'image',
'deb': 'archive',
'der': 'certificate',
'djvu': 'image',
'dmg': 'archive',
'dng': 'image',
'doc': 'document',
'docx': 'document',
'dvi': 'print',
'eot': 'font',
'eps': 'vector',
'exe': 'binary',
'exr': 'image',
'flac': 'audio',
'flv': 'video',
'gif': 'webimage',
'gz': 'archive',
'gzip': 'archive',
'h': 'script',
'htm': 'html',
'html': 'html',
'ico': 'image',
'icon': 'image',
'img': 'disk',
'iso': 'disk',
'jar': 'archive',
'java': 'script',
'jp2': 'image',
'jpe': 'webimage',
'jpeg': 'webimage',
'jpg': 'webimage',
'jpx': 'image',
'js': 'script',
'key': 'presentation',
'keynote': 'presentation',
'lisp': 'script',
'lz': 'archive',
'lzip': 'archive',
'm': 'script',
'm4a': 'audio',
'm4v': 'video',
'mds': 'disk',
'mdx': 'disk',
'mid': 'audio',
'midi': 'audio',
'mkv': 'video',
'mng': 'image',
'mp2': 'audio',
'mp3': 'audio',
'mp4': 'video',
'mpe': 'video',
'mpeg': 'video',
'mpg': 'video',
'nrg': 'disk',
'numbers': 'spreadsheet',
'odg': 'vector',
'odm': 'document',
'odp': 'presentation',
'ods': 'spreadsheet',
'odt': 'document',
'ogg': 'audio',
'ogm': 'video',
'otf': 'font',
'p12': 'certificate',
'pak': 'archive',
'pbm': 'image',
'pdf': 'print',
'pem': 'certificate',
'pfx': 'certificate',
'pgf': 'image',
'pgm': 'image',
'pk3': 'archive',
'pk4': 'archive',
'pl': 'script',
'png': 'webimage',
'pnm': 'image',
'ppm': 'image',
'pps': 'presentation',
'ppt': 'presentation',
'ps': 'print',
'psd': 'image',
'psp': 'image',
'py': 'script',
'r': 'script',
'ra': 'audio',
'rar': 'archive',
'rb': 'script',
'rpm': 'archive',
'rtf': 'text',
'sh': 'script',
'sub': 'disk',
'svg': 'vector',
'sxc': 'spreadsheet',
'sxd': 'vector',
'tar': 'archive',
'tga': 'image',
'tif': 'image',
'tiff': 'image',
'ttf': 'font',
'txt': 'text',
'vbs': 'script',
'vc': 'spreadsheet',
'vml': 'vector',
'wav': 'audio',
'webp': 'image',
'wma': 'audio',
'wmv': 'video',
'woff': 'font',
'xar': 'vector',
'xbm': 'image',
'xcf': 'image',
'xhtml': 'html',
'xls': 'spreadsheet',
'xlsx': 'spreadsheet',
'xml': 'html',
'zip': 'archive'
}
def get_attachment_type(self, cr, uid, ids, name, args, context=None):
result = {}
for attachment in self.browse(cr, uid, ids, context=context):
fileext = os.path.splitext(attachment.datas_fname or '')[1].lower()[1:]
result[attachment.id] = self._fileext_to_type.get(fileext, 'unknown')
return result
_columns = {
'file_type_icon': fields.function(get_attachment_type, type='char', string='File Type Icon'),
'file_type': fields.related('file_type_icon', type='char'), # FIXME remove in trunk
}
| agpl-3.0 |
adamchainz/ansible | lib/ansible/plugins/shell/sh.py | 69 | 4222 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles.
# Note: sh is the default shell plugin so this plugin may also be selected
# if the filename is not listed in any Shell plugin.
COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'sh'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def checksum(self, path, python_interp):
# The following test needs to be SH-compliant. BASH-isms will
# not work if /bin/sh points to a non-BASH shell.
#
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is
# returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code
# expects
#
# If all of the available hashing methods fail we fail with an rc of
# 0. This logic is added to the end of the cmd at the bottom of this
# function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = shlex_quote(path)
test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
return cmd
| gpl-3.0 |
D4rk4/Neural-Network-Trading-Bot | pyTrader/db_access.py | 3 | 2030 | import sqlite3 # execute, fetchone, fetchall, commit
import os.path # path.isfile
timeout = 15
nonce_table = 'nonce'
output_table = 'output'
def create_nonce_db():
new_nonce_db = sqlite3.connect('auth/nonce.sqlite')
new_nonce_db.execute('CREATE TABLE IF NOT EXISTS nonce (current_nonce INTEGER)' )
new_nonce_db.close()
def db_connect(_nonce_db_path, _output_db_path):
global nonce_connect, nonce_cursor
global out_connect, out_cursor
try:
nonce_connect = sqlite3.connect(_nonce_db_path, timeout)
out_connect = sqlite3.connect(_output_db_path, timeout)
nonce_cursor = nonce_connect.cursor()
out_cursor = out_connect.cursor()
except sqlite3.ProgrammingError as e:
print(e)
def db_exists(_db):
if os.path.isfile(_db):
return True
else:
return False
def output_records_exist():
out_cursor.execute('SELECT COUNT(*) FROM ' + output_table)
data_check = out_cursor.fetchall()
if data_check[0][0] > 0:
return True
else:
return False
def output_init_record():
out_cursor.execute('SELECT rowid, order_type FROM ' + output_table + ' ORDER BY ROWID DESC LIMIT 1')
return out_cursor.fetchall()[0][0]
def get_last_output_record():
out_cursor.execute('SELECT rowid, current_price, predicted_price, order_type, err_rate FROM ' + output_table + ' ORDER BY ROWID DESC LIMIT 1')
last_record = out_cursor.fetchall()
return last_record[0]
# Returns the nonce value anticipated by BTC-e API and increments nonce value
def get_nonce():
nonce_cursor.execute('SELECT current_nonce FROM ' + nonce_table + ' ORDER BY current_nonce DESC LIMIT 1')
nonce = nonce_cursor.fetchone()[0]
nonce_cursor.execute('INSERT INTO ' + nonce_table + ' (current_nonce) VALUES(?)', [nonce + 1])
nonce_connect.commit()
return nonce
def adjust_nonce(_expected_nonce):
nonce_cursor.execute('INSERT INTO ' + nonce_table + ' (current_nonce) VALUES(?)', [_expected_nonce + 1])
nonce_connect.commit() | mit |
ahmadshahwan/cohorte-runtime | python/src/lib/python/jinja2/environment.py | 614 | 47244 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| apache-2.0 |
mikekap/buck | third-party/py/unittest2/unittest2/result.py | 152 | 6133 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from unittest2 import util
from unittest2.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| apache-2.0 |
hugegreenbug/libgestures | include/build/ios/clean_env.py | 208 | 2258 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def Main(argv):
"""This is like 'env -i', but it uses a whitelist of env variables to allow
through to the command being run. It attempts to strip off Xcode-added
values from PATH.
"""
# Note: An attempt was made to do something like: env -i bash -lc '[command]'
# but that fails to set the things set by login (USER, etc.), so instead
# the only approach that seems to work is to have a whitelist.
env_key_whitelist = (
'HOME',
'LOGNAME',
# 'PATH' added below (but filtered).
'PWD',
'SHELL',
'TEMP',
'TMPDIR',
'USER'
)
# Need something to run.
# TODO(lliabraa): Make this output a usage string and exit (here and below).
assert(len(argv) > 0)
add_to_path = [];
first_entry = argv[0];
if first_entry.startswith('ADD_TO_PATH='):
argv = argv[1:];
add_to_path = first_entry.replace('ADD_TO_PATH=', '', 1).split(':')
# Still need something to run.
assert(len(argv) > 0)
clean_env = {}
# Pull over the whitelisted keys.
for key in env_key_whitelist:
val = os.environ.get(key, None)
if not val is None:
clean_env[key] = val
# Collect the developer dir as set via Xcode, defaulting it.
dev_prefix = os.environ.get('DEVELOPER_DIR', '/Developer/')
if dev_prefix[-1:] != '/':
dev_prefix += '/'
# Now pull in PATH, but remove anything Xcode might have added.
initial_path = os.environ.get('PATH', '')
filtered_chunks = \
[x for x in initial_path.split(':') if not x.startswith(dev_prefix)]
if filtered_chunks:
clean_env['PATH'] = ':'.join(add_to_path + filtered_chunks)
# Add any KEY=VALUE args before the command to the cleaned environment.
args = argv[:]
while '=' in args[0]:
(key, val) = args[0].split('=', 1)
clean_env[key] = val
args = args[1:]
# Still need something to run.
assert(len(args) > 0)
# Off it goes...
os.execvpe(args[0], args, clean_env)
# Should never get here, so return a distinctive, non-zero status code.
return 66
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
promptworks/horizon | openstack_dashboard/dashboards/project/loadbalancers/panel.py | 33 | 1733 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import neutron
from openstack_dashboard.dashboards.project import dashboard
LOG = logging.getLogger(__name__)
class LoadBalancer(horizon.Panel):
name = _("Load Balancers")
slug = "loadbalancers"
permissions = ('openstack.services.network',)
def allowed(self, context):
request = context['request']
if not request.user.has_perms(self.permissions):
return False
try:
if not neutron.is_service_enabled(request,
config_name='enable_lb',
ext_name='lbaas'):
return False
except Exception:
LOG.error("Call to list enabled services failed. This is likely "
"due to a problem communicating with the Neutron "
"endpoint. Load Balancers panel will not be displayed.")
return False
if not super(LoadBalancer, self).allowed(context):
return False
return True
dashboard.Project.register(LoadBalancer)
| apache-2.0 |
AlexBourassa/Generic_UI | A_Lab/Tests/ConsoleWidget.py | 4 | 1958 | # -*- coding: utf-8 -*-
"""
This class was borrowed from HyperSpyUI
https://github.com/vidartf/hyperspyUI
Created on Tue Nov 04 16:04:17 2014
@author: Vidar Tonaas Fauske
"""
try:
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
except ImportError:
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget as \
RichJupyterWidget
from IPython.qt.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
class ConsoleWidget(RichJupyterWidget):
def __init__(self, *args, **kwargs):
super(ConsoleWidget, self).__init__(*args, **kwargs)
# Create an in-process kernel
app = guisupport.get_app_qt4()
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
# Set the kernel data
self.kernel = kernel_manager.kernel
self.kernel.gui = 'qt4'
kernel_client = kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
app.exit()
self.kernel_manager = kernel_manager
self.kernel_client = kernel_client
self.exit_requested.connect(stop)
def ex(self, source):
self.kernel.shell.ex(source)
def push(self, variables):
self.kernel.shell.push(variables)
def _execute(self, source, hidden):
""" Execute 'source'. If 'hidden', do not show any output.
See parent class :meth:`execute` docstring for full details.
Overriden copy to move 'executing' event before it is actually executed
"""
if not hidden:
self.executing.emit(source)
msg_id = self.kernel_client.execute(source, hidden)
self._request_info['execute'][
msg_id] = self._ExecutionRequest(msg_id, 'user')
self._hidden = hidden | gpl-2.0 |
LynxyssCZ/Flexget | flexget/plugins/filter/seen_movies.py | 9 | 2922 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from collections import defaultdict
from flexget import plugin
from flexget.event import event
from flexget.plugins.filter.seen import FilterSeen
log = logging.getLogger('seenmovies')
class FilterSeenMovies(FilterSeen):
"""
Prevents movies being downloaded twice.
Works only on entries which have imdb url available.
How duplicate movie detection works:
1) Remember all imdb urls from downloaded entries.
2) If stored imdb url appears again, entry is rejected.
"""
schema = {
'oneOf': [
{'type': 'string', 'enum': ['strict', 'loose']},
{
'type': 'object',
'properties': {
'scope': {'type': 'string', 'enum': ['global', 'local']},
'matching': {'type': 'string', 'enum': ['strict', 'loose']}
},
'additionalProperties': False
}
]
}
def __init__(self):
# remember and filter by these fields
self.fields = ['imdb_id', 'tmdb_id', 'trakt_movie_id']
self.keyword = 'seen_movies'
# We run last (-255) to make sure we don't reject duplicates before all the other plugins get a chance to reject.
@plugin.priority(-255)
def on_task_filter(self, task, config): # pylint: disable=W0221
if not isinstance(config, dict):
config = {'matching': config}
# Reject all entries without
if config.get('matching') == 'strict':
for entry in task.entries:
if not any(field in entry for field in self.fields):
log.info('Rejecting %s because of missing movie (imdb, tmdb or trakt) id' % entry['title'])
entry.reject('missing movie (imdb, tmdb or trakt) id, strict')
# call super
super(FilterSeenMovies, self).on_task_filter(task, config.get('scope', True))
# check that two copies of a movie have not been accepted this run
accepted_ids = defaultdict(set)
for entry in task.accepted:
for field in self.fields:
if field in entry:
if entry[field] in accepted_ids[field]:
entry.reject('already accepted %s %s once in task' % (field, entry[field]))
break
else:
accepted_ids[field].add(entry[field])
def on_task_learn(self, task, config):
if not isinstance(config, dict):
config = {'matching': config}
# call super
super(FilterSeenMovies, self).on_task_learn(task, config.get('scope', True))
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeenMovies, 'seen_movies', api_ver=2)
| mit |
Regalis/browser-accelerator | browser-accelerator.py | 1 | 9987 | #!/usr/bin/python2
# coding=utf-8
#
# Copyright 2010 Patryk Jaworski <skorpion9312@gmail.com>
# License: GNU GPLv3
#
GNUPG=True;
GNUPG_MODULE=True;
import sys;
import tarfile;
import commands;
import os;
import getpass;
import shutil;
try:
import gnupg;
except ImportError, e:
GNUPG_MODULE=False;
GNUPG=False;
class browserAccelerator:
__supportedBrowsers={"chromium": ("--user-data-dir=%s", ".config/chromium"), "chromium-browser": ("--user-data-dir=%s", ".config/chromium"), "opera": ("-pd %s", ".opera")};
__browsers={};
__homeDir=None;
__configDir=None;
__gpg=None;
__config=None;
__user=None;
def __init__(self, GNUPG, GNUPG_MODULE):
self.__GNUPG=GNUPG;
self.__GNUPG_MODULE=GNUPG_MODULE;
self.printHeader();
self.__homeDir=os.getenv("HOME");
self.__user=os.getenv("USER");
if not self.__homeDir or not self.__user:
print "[E] Unable to read required environment variables: HOME or UID...";
print self.__homeDir+" "+self.__user;
sys.exit(1);
self.__configDir="%s/%s" % (self.__homeDir, ".browser-accelerator");
if len(sys.argv) < 2:
self.__printUsage();
sys.exit(1);
if sys.argv[1] == "--configure":
self.__configure();
sys.exit(0);
if sys.argv[1] == "--help":
self.__printUsage();
sys.exit(0);
self.__readConfig();
try:
if self.__config["use_gnupg"]=="True":
print "[I] Initializing GnuPG...";
self.__initGPG();
except:
pass;
if sys.argv[1] not in self.__supportedBrowsers:
print "[E] Unsupported browser: %s" % (sys.argv[1]);
self.__printUsage();
sys.exit(1);
if not os.path.exists("%s/.gnupg" % (self.__homeDir)):
print "[W] Unable to find .gnupg directory. Please configure your GnuPG first...";
self.__GNUPG=False;
if not self.__GNUPG_MODULE:
print "[W] Unable to load optional module *gnupg*...";
if not self.__GNUPG:
print "[I] Disabling encryption capabilities...";
self.__run(sys.argv[1]);
def __readConfig(self):
if not os.path.exists(self.__configDir) or not os.path.exists(self.__configDir+"/config"):
print "[I] Running configurator...";
self.__configure();
conf=open("%s/config" % (self.__configDir), "r");
lines=conf.readlines();
if len(lines) < 2:
print "[E] Error in configuration file, please check it...";
sys.exit(1);
self.__config={};
for l in lines:
e=l.find("=");
if e < 1:
print "[E] Error in configuration file, please check it...";
sys.exit(1);
name=l[0:e].strip();
value=l[(e+1):].strip();
self.__config[name]=value;
def __initGPG(self):
self.__gpg=gnupg.GPG(gnupghome="%s/.gnupg" % (self.__homeDir));
def __configure(self):
print "[I] Searching for installed browsers...";
binDirs=("/usr/bin/","/usr/local/bin/");
for browser in self.__supportedBrowsers:
for dir in binDirs:
bin="%s%s" % (dir, browser);
if os.path.exists(bin):
print "[I] Found %s in %s..." % (browser, bin);
self.__browsers[browser]=bin;
usegpg=False;
if self.__GNUPG:
usegpg=utils.getLine("Do you want to use GnuGP to encrypt/decrypt your browser profiles (yes/no)?", True, "no").lower();
if usegpg in ("no","n"):
usegpg=False;
else:
usegpg=True;
if usegpg:
self.__initGPG();
keys=self.__gpg.list_keys(True);
if len(keys) < 1:
print "[E] Unable to find any secret keys... Please configure your GnuPG first.";
sys.exit(1);
i=1;
print "[I] Select key which will be use to encrypt/decrypt...";
print "[I] Secret keys list:";
for key in keys:
print "[%d] %s (%s)" % (i, key["keyid"], key["uids"][1]);
i=i+1;
while(True):
key=utils.getLine("Enter key index (eg. 1)", True, "1");
try:
if int(key) not in range(1, i):
continue;
else:
break;
except ValueError:
continue;
if not os.path.exists(self.__configDir):
print "[I] Creating configuration directory (%s)..." % (self.__configDir);
try:
os.mkdir(self.__configDir);
except OSError, e:
print "[E] Error while creating directory: %s" % (e);
sys.exit(1);
if usegpg:
use_gnupg="True";
gnupg_key=keys[(int(key)-1)]["keyid"];
else:
use_gnupg="False";
gnupg_key="None";
config=open("%s/config" % (self.__configDir), "w");
config.write("use_gnupg=%s\n" % (use_gnupg));
config.write("gnupg_key=%s\n" % (gnupg_key));
for browser in self.__browsers:
config.write("%s=%s\n" % (browser, self.__browsers[browser]));
config.close();
print "[I] Configuration saved...";
def __run(self, browser):
print "[I] Checking %s" % (browser);
bin=None;
try:
bin=self.__config[browser];
except KeyError:
print "[E] Please add %s to your configuration file or try to run browser-accelerator with --configure option..." % (browser);
sys.exit(1);
end=bin.find(" ");
if end < 0:
end=len(bin);
if not os.path.exists(bin[0:end]):
print "[E] Unable to find your browser in %s..." % (browser);
sys.exit(1);
use_gnupg=False;
try:
if self.__config["use_gnupg"]=="True":
use_gnupg=True;
try:
key=self.__config["gnupg_key"];
if not key:
raise KeyError;
except KeyError:
print "[E] Please specify \"gnupg_key\" in your configuration file...";
os.remove(tmp);
sys.exit(1);
except KeyError:
pass;
profile="%s/%s" % (self.__homeDir, self.__supportedBrowsers[browser][1]);
if not os.path.exists(profile):
print "[E] Unable to find your browser profile in %s. Please run %s first..." % (profile, bin);
sys.exit(1);
localProfile="%s/%s.tar.bz2" % (self.__configDir, browser);
if use_gnupg:
if os.path.exists(localProfile):
print "[I] Detected old package in profile directory...";
print "[I] Using GnuPG to encrypt tarball...";
self.__gpg.encrypt_file(open(localProfile,"r"), key, output="%s.gpg" % (localProfile));
os.remove(localProfile);
print "[I] Done...";
localProfile=localProfile+".gpg";
tmp="/dev/shm/%s-%s" % (self.__user, localProfile[(localProfile.rfind("/")+1):]);
if not os.path.exists(localProfile):
if not use_gnupg:
if os.path.exists(localProfile+".gpg"):
print "[SW] Security warning!";
print "[SW] Detected encrypted profile tarball. Please run browser-accelerator with GnuPG enabled (edit your configuration file), manually decrypt your tarball *or* remove it to continue without encryption.";
sys.exit(1);
if use_gnupg:
tmp=tmp[0:-4];
print "[I] Creating profile tarball...";
base="%s" % (profile[0:profile.rfind("/")]);
utils.mktar(base, profile, tmp);
print "[I] Done...";
if use_gnupg:
print "[I] Using GnuPG to encrypt tarball...";
self.__gpg.encrypt_file(open(tmp,"r"), key, output=tmp+".gpg");
print "[I] Done...";
os.remove(tmp);
tmp=tmp+".gpg";
print "[I] Moving tarball to profile directory...";
shutil.move(tmp, localProfile);
print "[I] OK...";
print "[I] Preparing profile...";
RAMProfile="/dev/shm/%s-%s" % (self.__user, browser);
tarball=localProfile;
if use_gnupg:
password=getpass.getpass("[G] Enter your GnuPG password: ");
print "[I] Decrypting browser data...";
self.__gpg.decrypt_file(open(localProfile, "r"), output=tmp[0:-4], passphrase=password);
# TODO:
# I don't know how to catch exception (ValueError - eg. bad password) reported by gnupg thread :(
if not os.path.exists(tmp[0:-4]):
print "[E] Decrypting error...";
sys.exit(1);
tarball=tmp[0:-4];
print "[I] Unpacking tarball...";
utils.extar(RAMProfile, tarball);
os.remove(tarball);
print "[I] Done...";
RAMProfile="%s/%s" % (RAMProfile, self.__supportedBrowsers[browser][1][self.__supportedBrowsers[browser][1].rfind("/")+1:]);
cmd="%s %s" % (self.__config[browser], self.__supportedBrowsers[browser][0] % (RAMProfile));
print "[I] Executing %s" % (cmd);
try:
status = commands.getstatusoutput(cmd);
except KeyboardInterrupt:
print "[I] Exiting...";
print "[I] Done...";
browserbase=self.__supportedBrowsers[browser][1][self.__supportedBrowsers[browser][1].rfind("/")+1:];
tmpbase=RAMProfile[0:RAMProfile.find("/"+browserbase)];
print "[I] Creating tarball...";
utils.mktar(tmpbase, "%s/%s" % (tmpbase, browserbase), tarball);
shutil.rmtree(tmpbase);
if use_gnupg:
print "[I] Encrypting tarball...";
self.__gpg.encrypt_file(open(tarball,"r"), key, output=localProfile);
os.remove(tarball);
else:
shutil.move(tarball, localProfile);
print "\n"+" Done ".center(50, "=")+"\n";
def __printUsage(self):
print "Usage:";
print "\t%s [option] OR [browser]" % (sys.argv[0]);
print "\nOptions:";
print "\t--configure".ljust(20, " "),;
print "Run an interactive configurator";
print "\t--help".ljust(20, " "),;
print "Print this help message";
print "\nBrowsers:";
for browser in self.__supportedBrowsers:
print "-> %s" % (browser);
print "";
def printHeader(self):
print " Browser Accelerator ".center(50,"=");
print "= Author: Patryk Jaworski";
print "= Contact: skorpion9312@gmail.com";
print "= License: GNU GPLv3";
print " Copyright (c) by Patryk Jaworski ".center(50,"=")+"\n";
class utils:
@staticmethod
def getLine(msg, required=False, default=None):
while(True):
answer=raw_input("[G] %s [%s]: " % (msg, default));
if default is not None and not answer:
print "[I] Setting default value (%s)..." % (default);
answer=default;
if not answer and required:
print "[W] This field is required...";
continue;
return answer;
@staticmethod
def mktar(base, source, target):
os.chdir(base);
tar=tarfile.open(target, "w");
source=source[(source.find(base)+len(base)+1):];
tar.add(source);
tar.close();
@staticmethod
def extar(base, source):
if not os.path.exists(base):
try:
os.mkdir(base);
except:
print "[E] Error while creating folder...";
sys.exit(1);
os.chdir(base);
tar=tarfile.open(source, "r");
tar.extractall();
tar.close();
if __name__=="__main__":
browserAccelerator(GNUPG, GNUPG_MODULE);
| gpl-3.0 |
jnewbery/bitcoin | test/functional/p2p_leak_tx.py | 24 | 2358 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we don't leak txs to inbound peers that we haven't yet announced to"""
from test_framework.messages import msg_getdata, CInv, MSG_TX
from test_framework.p2p import p2p_lock, P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
from test_framework.wallet import MiniWallet
class P2PNode(P2PDataStore):
def on_inv(self, msg):
pass
class P2PLeakTxTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
gen_node = self.nodes[0] # The block and tx generating node
miniwallet = MiniWallet(gen_node)
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(1)
gen_node.generate(100)
inbound_peer = self.nodes[0].add_p2p_connection(P2PNode()) # An "attacking" inbound peer
MAX_REPEATS = 100
self.log.info("Running test up to {} times.".format(MAX_REPEATS))
for i in range(MAX_REPEATS):
self.log.info('Run repeat {}'.format(i + 1))
txid = miniwallet.send_self_transfer(from_node=gen_node)['wtxid']
want_tx = msg_getdata()
want_tx.inv.append(CInv(t=MSG_TX, h=int(txid, 16)))
with p2p_lock:
inbound_peer.last_message.pop('notfound', None)
inbound_peer.send_and_ping(want_tx)
if inbound_peer.last_message.get('notfound'):
self.log.debug('tx {} was not yet announced to us.'.format(txid))
self.log.debug("node has responded with a notfound message. End test.")
assert_equal(inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16))
with p2p_lock:
inbound_peer.last_message.pop('notfound')
break
else:
self.log.debug('tx {} was already announced to us. Try test again.'.format(txid))
assert int(txid, 16) in [inv.hash for inv in inbound_peer.last_message['inv'].inv]
if __name__ == '__main__':
P2PLeakTxTest().main()
| mit |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/crm_partner_assign/wizard/crm_channel_interested.py | 263 | 3795 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class crm_lead_forward_to_partner(osv.TransientModel):
""" Forward info history to partners. """
_name = 'crm.lead.channel.interested'
_columns = {
'interested': fields.boolean('Interested by this lead'),
'contacted': fields.boolean('Did you contact the lead?', help="The lead has been contacted"),
'comment': fields.text('Comment', help="What are the elements that have led to this decision?", required=True),
}
_defaults = {
'interested': lambda self, cr, uid, c: c.get('interested', True),
'contacted': False,
}
def action_confirm(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
if wizard.interested and not wizard.contacted:
raise osv.except_osv(_('Error!'), _("You must contact the lead before saying that you are interested"))
lead_obj = self.pool.get('crm.lead')
lead_obj.check_access_rights(cr, uid, 'write')
if wizard.interested:
message = _('<p>I am interested by this lead.</p>')
values = {}
else:
stage = 'stage_portal_lead_recycle'
message = _('<p>I am not interested by this lead. I %scontacted the lead.</p>') % (not wizard.contacted and 'have not ' or '')
values = {'partner_assigned_id': False}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
partner_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [('id', 'child_of', user.partner_id.commercial_partner_id.id)], context=context)
lead_obj.message_unsubscribe(cr, SUPERUSER_ID, context.get('active_ids', []), partner_ids, context=None)
try:
stage_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', stage)[1]
except ValueError:
stage_id = False
if stage_id:
values.update({'stage_id': stage_id})
if wizard.comment:
message += '<p>%s</p>' % wizard.comment
for active_id in context.get('active_ids', []):
lead_obj.message_post(cr, uid, active_id, body=message, subtype="mail.mt_comment", context=context)
if values:
lead_obj.write(cr, SUPERUSER_ID, context.get('active_ids', []), values)
if wizard.interested:
for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context):
lead_obj.convert_opportunity(cr, SUPERUSER_ID, [lead.id], lead.partner_id and lead.partner_id.id or None, context=None)
return {
'type': 'ir.actions.act_window_close',
}
| agpl-3.0 |
maciekcc/tensorflow | tensorflow/contrib/linalg/python/ops/linear_operator_identity.py | 31 | 27452 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like the identity matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
"LinearOperatorIdentity",
"LinearOperatorScaledIdentity",
]
class BaseLinearOperatorIdentity(linear_operator.LinearOperator):
"""Base class for Identity operators."""
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _min_matrix_dim(self):
"""Minimum of domain/range dimension, if statically available, else None."""
domain_dim = self.domain_dimension.value
range_dim = self.range_dimension.value
if domain_dim is None or range_dim is None:
return None
return min(domain_dim, range_dim)
def _min_matrix_dim_tensor(self):
"""Minimum of domain/range dimension, as a tensor."""
return math_ops.reduce_min(self.shape_tensor()[-2:])
def _ones_diag(self):
"""Returns the diagonal of this operator as all ones."""
if self.shape.is_fully_defined():
d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
else:
d_shape = array_ops.concat(
[self.batch_shape_tensor(),
[self._min_matrix_dim_tensor()]], axis=0)
return array_ops.ones(shape=d_shape, dtype=self.dtype)
class LinearOperatorIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a [batch] square identity matrix.
This operator acts like a [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorIdentity` is initialized with `num_rows`, and optionally
`batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this
operator efficiently passes through all arguments. If `batch_shape` is
provided, broadcasting may occur, which will require making copies.
```python
# Create a 2 x 2 identity matrix.
operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32)
operator.to_dense()
==> [[1., 0.]
[0., 1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> 0.
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor, same as x.
y = tf.random_normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
# This broadcast does NOT require copying data, since we can infer that y
# will be passed through without changing shape. We are always able to infer
# this if the operator has no batch_shape.
x = operator.solve(y)
==> Shape [3, 2, 4] Tensor, same as y.
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2])
operator.to_dense()
==> [[[1., 0.]
[0., 1.]],
[[1., 0.]
[0., 1.]]]
# Here, even though the operator has a batch shape, the input is the same as
# the output, so x can be passed through without a copy. The operator is able
# to detect that no broadcast is necessary because both x and the operator
# have statically defined shape.
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, same as x
# Here the operator and x have different batch_shape, and are broadcast.
# This requires a copy, since the output is different size than the input.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, equal to [x, x]
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
If `batch_shape` initialization arg is `None`:
* `operator.matmul(x)` is `O(1)`
* `operator.solve(x)` is `O(1)`
* `operator.determinant()` is `O(1)`
If `batch_shape` initialization arg is provided, and static checks cannot
rule out the need to broadcast:
* `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(B1*...*Bb)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
batch_shape=None,
dtype=None,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorIdentity"):
r"""Initialize a `LinearOperatorIdentity`.
The `LinearOperatorIdentity` is initialized with arguments defining `dtype`
and shape.
This operator is able to broadcast the leading (batch) dimensions, which
sometimes requires copying data. If `batch_shape` is `None`, the operator
can take arguments of any batch shape without copying. See examples.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
dimensions. If `None`, this operator has no leading dimensions.
dtype: Data type of the matrix that this operator represents.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
ValueError: If `batch_shape` is determined statically to not be 1-D, or
negative.
ValueError: If any of the following is not `True`:
`{is_self_adjoint, is_non_singular, is_positive_definite}`.
"""
dtype = dtype or dtypes.float32
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name):
dtype = dtypes.as_dtype(dtype)
if not is_self_adjoint:
raise ValueError("An identity operator is always self adjoint.")
if not is_non_singular:
raise ValueError("An identity operator is always non-singular.")
if not is_positive_definite:
raise ValueError("An identity operator is always positive-definite.")
if not is_square:
raise ValueError("An identity operator is always square.")
super(LinearOperatorIdentity, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
if batch_shape is None:
self._batch_shape_arg = None
else:
self._batch_shape_arg = linear_operator_util.shape_tensor(
batch_shape, name="batch_shape_arg")
self._batch_shape_static = tensor_util.constant_value(
self._batch_shape_arg)
self._check_batch_shape_possibly_add_asserts()
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_rows_static))
if self._batch_shape_arg is None:
return matrix_shape
batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)
if self._batch_shape_arg is None:
return matrix_shape
return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
return control_flow_ops.no_op("assert_positive_definite")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.get_shape():
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Note that adjoint has no effect since this matrix is self-adjoint.
x = linear_operator_util.matrix_adjoint(x) if adjoint_arg else x
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
return self._possibly_broadcast_batch_shape(x)
def _determinant(self):
return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self._matmul(rhs, adjoint_arg=adjoint_arg)
def _trace(self):
# Get Tensor of all ones of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
else:
batch_of_ones = array_ops.ones(
shape=self.batch_shape_tensor(), dtype=self.dtype)
if self._min_matrix_dim() is not None:
return self._min_matrix_dim() * batch_of_ones
else:
return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) *
batch_of_ones)
def _diag_part(self):
return self._ones_diag()
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[mat]):
mat = ops.convert_to_tensor(mat, name="mat")
mat_diag = array_ops.matrix_diag_part(mat)
new_diag = 1 + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _check_batch_shape_possibly_add_asserts(self):
"""Static check of init arg `batch_shape`, possibly add asserts."""
if self._batch_shape_arg is None:
return
# Possibly add asserts
if self._assert_proper_shapes:
self._batch_shape_arg = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._batch_shape_arg,
1,
message="Argument batch_shape must be a 1-D Tensor."),
check_ops.assert_non_negative(
self._batch_shape_arg,
message="Argument batch_shape must be non-negative."),
], self._batch_shape_arg)
# Static checks
if not self._batch_shape_arg.dtype.is_integer:
raise TypeError("Argument batch_shape must be integer type. Found:"
" %s" % self._batch_shape_arg)
if self._batch_shape_static is None:
return # Cannot do any other static checks.
if self._batch_shape_static.ndim != 1:
raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
" %s" % self._batch_shape_static)
if np.any(self._batch_shape_static < 0):
raise ValueError("Argument batch_shape must be non-negative. Found:"
"%s" % self._batch_shape_static)
class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`.
This operator acts like a scaled [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
a scaled version of the `N x N` identity matrix.
`LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier`
(a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the
`multiplier` determines the scale for each batch member.
```python
# Create a 2 x 2 scaled identity matrix.
operator = LinearOperatorIdentity(num_rows=2, multiplier=3.)
operator.to_dense()
==> [[3., 0.]
[0., 3.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> 2 * Log[3]
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> 3 * x
y = tf.random_normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
x = operator.solve(y)
==> 3 * x
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, multiplier=5.)
operator.to_dense()
==> [[[5., 0.]
[0., 5.]],
[[5., 0.]
[0., 5.]]]
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> 5 * x
# Here the operator and x have different batch_shape, and are broadcast.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> 5 * x
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
* `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(D1*...*Dd)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
multiplier,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorScaledIdentity"):
r"""Initialize a `LinearOperatorScaledIdentity`.
The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which
determines the size of each identity matrix, and a `multiplier`,
which defines `dtype`, batch shape, and scale of each matrix.
This operator is able to broadcast the leading (batch) dimensions.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
"""
self._assert_proper_shapes = assert_proper_shapes
if not is_square:
raise ValueError("A ScaledIdentity operator is always square.")
with ops.name_scope(name, values=[multiplier, num_rows]):
self._multiplier = ops.convert_to_tensor(multiplier, name="multiplier")
super(LinearOperatorScaledIdentity, self).__init__(
dtype=self._multiplier.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Shape [B1,...Bb, 1, 1]
self._multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(self.multiplier, -1), -1)
self._multiplier_matrix_conj = math_ops.conj(self._multiplier_matrix)
self._abs_multiplier = math_ops.abs(self.multiplier)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)
self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows,
self.dtype.real_dtype)
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_rows_static))
batch_shape = self.multiplier.get_shape()
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)
batch_shape = array_ops.shape(self.multiplier)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _assert_non_singular(self):
return check_ops.assert_positive(
math_ops.abs(self.multiplier), message="LinearOperator was singular")
def _assert_positive_definite(self):
return check_ops.assert_positive(
math_ops.real(self.multiplier),
message="LinearOperator was not positive definite.")
def _assert_self_adjoint(self):
imag_multiplier = math_ops.imag(self.multiplier)
return check_ops.assert_equal(
array_ops.zeros_like(imag_multiplier),
imag_multiplier,
message="LinearOperator was not self-adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = linear_operator_util.matrix_adjoint(x) if adjoint_arg else x
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
return x * matrix
def _determinant(self):
return self.multiplier**self._num_rows_cast_to_dtype
def _log_abs_determinant(self):
return self._num_rows_cast_to_real_dtype * math_ops.log(
self._abs_multiplier)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linear_operator_util.matrix_adjoint(rhs) if adjoint_arg else rhs
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)
rhs = control_flow_ops.with_dependencies([aps], rhs)
return rhs / matrix
def _trace(self):
# Get Tensor of all ones of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
else:
batch_of_ones = array_ops.ones(
shape=self.batch_shape_tensor(), dtype=self.dtype)
if self._min_matrix_dim() is not None:
return self.multiplier * self._min_matrix_dim() * batch_of_ones
else:
return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(),
self.dtype) * batch_of_ones)
def _diag_part(self):
return self._ones_diag() * self.multiplier[..., array_ops.newaxis]
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[mat]):
# Shape [B1,...,Bb, 1]
multiplier_vector = array_ops.expand_dims(self.multiplier, -1)
# Shape [C1,...,Cc, M, M]
mat = ops.convert_to_tensor(mat, name="mat")
# Shape [C1,...,Cc, M]
mat_diag = array_ops.matrix_diag_part(mat)
# multiplier_vector broadcasts here.
new_diag = multiplier_vector + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
@property
def multiplier(self):
"""The [batch] scalar `Tensor`, `c` in `cI`."""
return self._multiplier
| apache-2.0 |
40223219/2015cd_midterm2- | static/Brython3.1.1-20150328-091302/Lib/importlib/_bootstrap.py | 623 | 63710 | """Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so, will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# XXX Make sure all public names have no single leading underscore and all
# others do.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
# TODO: Expose from marshal
def _w_long(x):
"""Convert a 32-bit integer to little-endian.
XXX Temporary until marshal's long functions are exposed.
"""
x = int(x)
int_bytes = []
int_bytes.append(x & 0xFF)
int_bytes.append((x >> 8) & 0xFF)
int_bytes.append((x >> 16) & 0xFF)
int_bytes.append((x >> 24) & 0xFF)
return bytearray(int_bytes)
# TODO: Expose from marshal
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
new_parts = []
for part in path_parts:
if not part:
continue
new_parts.append(part)
if part[-1] not in path_separators:
new_parts.append(path_sep)
return ''.join(new_parts[:-1]) # Drop superfluous path separator.
def _path_split(path):
"""Replacement for os.path.split()."""
for x in reversed(path):
if x in path_separators:
sep = x
break
else:
sep = path_sep
front, _, tail = path.rpartition(sep)
return front, tail
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _os.stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
# XXX Could also expose Modules/getpath.c:isfile()
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
# XXX Could also expose Modules/getpath.c:isdir()
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_code_type = type(_wrap.__code__)
def new_module(name):
"""Create a new module.
The module is not entered into sys.modules.
"""
return type(_io)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError("deadlock detected by %r" % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError("cannot release un-acquired lock")
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return "_ModuleLock(%r) at %d" % (self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError("cannot release un-acquired lock")
self.count -= 1
def __repr__(self):
return "_DummyModuleLock(%r) at %d" % (self.name, id(self))
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
"""Magic word to reject .pyc files generated by other Python versions.
It should change for each incompatible change to the bytecode.
The value of CR and LF is incorporated so if you ever read or write
a .pyc file in text mode the magic number will be wrong; also, the
Apple MPW compiler swaps their values, botching string constants.
The magic numbers must be spaced apart at least 2 values, as the
-U interpeter flag will cause MAGIC+1 being used. They have been
odd numbers for some time now.
There were a variety of old schemes for setting the magic number.
The current working scheme is to increment the previous value by
10.
Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
number also includes a new "magic tag", i.e. a human readable string used
to represent the magic number in __pycache__ directories. When you change
the magic number, you must also set a new unique magic tag. Generally this
can be named after the Python major version of the magic number bump, but
it can really be anything, as long as it's different than anything else
that's come before. The tags are included in the following table, starting
with Python 3.2a0.
Known values:
Python 1.5: 20121
Python 1.5.1: 20121
Python 1.5.2: 20121
Python 1.6: 50428
Python 2.0: 50823
Python 2.0.1: 50823
Python 2.1: 60202
Python 2.1.1: 60202
Python 2.1.2: 60202
Python 2.2: 60717
Python 2.3a0: 62011
Python 2.3a0: 62021
Python 2.3a0: 62011 (!)
Python 2.4a0: 62041
Python 2.4a3: 62051
Python 2.4b1: 62061
Python 2.5a0: 62071
Python 2.5a0: 62081 (ast-branch)
Python 2.5a0: 62091 (with)
Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
Python 2.5b3: 62101 (fix wrong code: for x, in ...)
Python 2.5b3: 62111 (fix wrong code: x += yield)
Python 2.5c1: 62121 (fix wrong lnotab with for loops and
storing constants that should have been removed)
Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
Python 2.6a1: 62161 (WITH_CLEANUP optimization)
Python 3000: 3000
3010 (removed UNARY_CONVERT)
3020 (added BUILD_SET)
3030 (added keyword-only parameters)
3040 (added signature annotations)
3050 (print becomes a function)
3060 (PEP 3115 metaclass syntax)
3061 (string literals become unicode)
3071 (PEP 3109 raise changes)
3081 (PEP 3137 make __file__ and __name__ unicode)
3091 (kill str8 interning)
3101 (merge from 2.6a0, see 62151)
3103 (__file__ points to source file)
Python 3.0a4: 3111 (WITH_CLEANUP optimization).
Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
change LIST_APPEND and SET_ADD, add MAP_ADD)
Python 3.1a0: 3151 (optimize conditional branches:
introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
Python 3.2a0: 3160 (add SETUP_WITH)
tag: cpython-32
Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
tag: cpython-32
Python 3.2a2 3180 (add DELETE_DEREF)
Python 3.3a0 3190 __class__ super closure changed
Python 3.3a0 3200 (__qualname__ added)
3210 (added size modulo 2**32 to the pyc header)
Python 3.3a1 3220 (changed PEP 380 implementation)
Python 3.3a4 3230 (revert changes to implicit __class__ closure)
MAGIC must change whenever the bytecode emitted by the compiler may no
longer be understood by older implementations of the eval loop (usually
due to the addition of new opcodes).
"""
_RAW_MAGIC_NUMBER = 3230 | ord('\r') << 16 | ord('\n') << 24
_MAGIC_BYTES = bytes(_RAW_MAGIC_NUMBER >> n & 0xff for n in range(0, 25, 8))
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base_filename, sep, _ = tail.partition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([base_filename, sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def set_package(fxn):
"""Set __package__ on the returned module."""
def set_package_wrapper(*args, **kwargs):
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
_wrap(set_package_wrapper, fxn)
return set_package_wrapper
def set_loader(fxn):
"""Set __loader__ on the returned module."""
def set_loader_wrapper(self, *args, **kwargs):
module = fxn(self, *args, **kwargs)
if not hasattr(module, '__loader__'):
module.__loader__ = self
return module
_wrap(set_loader_wrapper, fxn)
return set_loader_wrapper
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
module = sys.modules.get(fullname)
is_reload = module is not None
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = new_module(fullname)
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
module.__initializing__ = True
sys.modules[fullname] = module
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
else:
module.__initializing__ = True
try:
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
except:
if not is_reload:
del sys.modules[fullname]
raise
finally:
module.__initializing__ = False
_wrap(module_for_loader_wrapper, fxn)
return module_for_loader_wrapper
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError("loader cannot handle %s" % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError("{} is not a built-in module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError("{} is not a frozen module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader()."""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = "Not importing directory {}: missing __init__"
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, module):
return "<module '{}' (built-in)>".format(module.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
"""
if path is not None:
return None
return cls if _imp.is_builtin(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
is_reload = fullname in sys.modules
try:
return _call_with_frames_removed(_imp.init_builtin, fullname)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, m):
return "<module '{}' (frozen)>".format(m.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module."""
return cls if _imp.is_frozen(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_frozen
def load_module(cls, fullname):
"""Load a frozen module."""
is_reload = fullname in sys.modules
try:
m = _call_with_frames_removed(_imp.init_frozen, fullname)
# Let our own module_repr() method produce a suitable repr.
del m.__file__
return m
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry.
"""
REGISTRY_KEY = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}")
REGISTRY_KEY_DEBUG = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}\\Debug")
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except WindowsError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, "")
except WindowsError:
return None
return filepath
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry."""
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_os.stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
return loader(fullname, filepath)
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def _bytes_from_bytecode(self, fullname, data, bytecode_path, source_stats):
"""Return the marshalled bytes from bytecode, verifying the magic
number, timestamp and source size along the way.
If source_stats is None then skip the timestamp check.
"""
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != _MAGIC_BYTES:
msg = 'bad magic number in {!r}: {!r}'.format(fullname, magic)
_verbose_message(msg)
raise ImportError(msg, name=fullname, path=bytecode_path)
elif len(raw_timestamp) != 4:
message = 'bad timestamp in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'bad size in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {}'.format(fullname)
_verbose_message(message)
raise ImportError(message, name=fullname,
path=bytecode_path)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError(
"bytecode is stale for {}".format(fullname),
name=fullname, path=bytecode_path)
# Can't return the code object as errors from marshal loading need to
# propagate even when source is available.
return data[12:]
@module_for_loader
def _load_module(self, module, *, sourceless=False):
"""Helper for load_module able to handle either source or sourceless
loading."""
name = module.__name__
code_object = self.get_code(name)
module.__file__ = self.get_filename(name)
if not sourceless:
try:
module.__cached__ = cache_from_source(module.__file__)
except NotImplementedError:
module.__cached__ = module.__file__
else:
module.__cached__ = module.__file__
module.__package__ = name
if self.is_package(name):
module.__path__ = [_path_split(module.__file__)[0]]
else:
module.__package__ = module.__package__.rpartition('.')[0]
module.__loader__ = self
_call_with_frames_removed(exec, code_object, module.__dict__)
return module
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
"""
raise NotImplementedError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
raise NotImplementedError
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
import tokenize
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except IOError as exc:
raise ImportError("source not available through get_data()",
name=fullname) from exc
readsource = _io.BytesIO(source_bytes).readline
try:
encoding = tokenize.detect_encoding(readsource)
except SyntaxError as exc:
raise ImportError("Failed to detect encoding",
name=fullname) from exc
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
try:
return newline_decoder.decode(source_bytes.decode(encoding[0]))
except UnicodeDecodeError as exc:
raise ImportError("Failed to decode source file",
name=fullname) from exc
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except NotImplementedError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except IOError:
pass
else:
try:
bytes_data = self._bytes_from_bytecode(fullname, data,
bytecode_path,
st)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_imp._fix_co_filename(found, source_path)
_verbose_message('code object from {}',
bytecode_path)
return found
else:
msg = "Non-code object in {}"
raise ImportError(msg.format(bytecode_path),
name=fullname, path=bytecode_path)
source_bytes = self.get_data(source_path)
code_object = _call_with_frames_removed(compile,
source_bytes, source_path, 'exec',
dont_inherit=True)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = bytearray(_MAGIC_BYTES)
data.extend(_w_long(source_mtime))
data.extend(_w_long(len(source_bytes)))
data.extend(marshal.dumps(code_object))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
def load_module(self, fullname):
"""Concrete implementation of Loader.load_module.
Requires ExecutionLoader.get_filename and ResourceLoader.get_data to be
implemented to load source code. Use of bytecode is dictated by whether
get_code uses/writes bytecode.
"""
return self._load_module(fullname)
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
@_check_name
def load_module(self, fullname):
"""Load a module from a file."""
# Issue #14857: Avoid the zero-argument form so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _os.stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
try:
mode = _os.stat(source_path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def load_module(self, fullname):
return self._load_module(fullname, sourceless=True)
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = self._bytes_from_bytecode(fullname, data, path, None)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_verbose_message('code object from {!r}', path)
return found
else:
raise ImportError("Non-code object in {}".format(path),
name=fullname, path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
@_check_name
@set_package
@set_loader
def load_module(self, fullname):
"""Load an extension module."""
is_reload = fullname in sys.modules
try:
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
if self.is_package(fullname) and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
return module
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
loader, new_path = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if loader is None:
self._path = new_path
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return "_NamespacePath({!r})".format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
class NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
return "<module '{}' (namespace)>".format(module.__name__)
@module_for_loader
def load_module(self, module):
"""Load a namespace module."""
_verbose_message('namespace module loaded with path {!r}', self._path)
module.__path__ = self._path
return module
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = '.'
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _get_loader(cls, fullname, path):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
# We found a loader: return it immediately.
return loader, namespace_path
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
return None, namespace_path
@classmethod
def find_module(cls, fullname, path=None):
"""Find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
loader, namespace_path = cls._get_loader(fullname, path)
if loader is not None:
return loader
else:
if namespace_path:
# We found at least one namespace path. Return a
# loader which can create the namespace package.
return NamespaceLoader(fullname, namespace_path, cls._get_loader)
else:
return None
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _os.stat(self.path).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
if _path_isdir(base_path):
for suffix, loader in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return (loader(fullname, full_path), [base_path])
else:
# A namespace package, return the path if we don't also
# find a module in the next section.
is_namespace = True
# Check for a file w/ a proper suffix exists.
for suffix, loader in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return (loader(fullname, full_path), [])
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
return (None, [base_path])
return (None, [])
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path)
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = set(fn.lower() for fn in contents)
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError("only directories are supported", path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return "FileFinder(%r)" % (self.path,)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_module(name, path):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
for finder in sys.meta_path:
#with _ImportLockContext():
# loader = finder.find_module(name, path)
loader = finder.find_module(name, path)
if loader is not None:
# The parent import may have already imported this module.
if name not in sys.modules:
return loader
else:
return sys.modules[name].__loader__
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError("module name must be str, not {}".format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError("__package__ not set to a string")
elif package not in sys.modules:
msg = ("Parent module {!r} not loaded, cannot perform relative "
"import")
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError("Empty module name")
_ERR_MSG = 'No module named {!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
# Backwards-compatibility; be nicer to skip the dict lookup.
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {} is not a package').format(name, parent)
raise ImportError(msg, name=name)
loader = _find_module(name, path)
if loader is None:
exc = ImportError(_ERR_MSG.format(name), name=name)
# TODO(brett): switch to a proper ModuleNotFound exception in Python
# 3.4.
exc._not_found = True
raise exc
elif name not in sys.modules:
# The parent import may have already imported this module.
loader.load_module(name)
_verbose_message('import {!r} # {!r}', name, loader)
# Backwards-compatibility; be nicer to skip the dict lookup.
module = sys.modules[name]
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
# Set __package__ if the loader did not.
if getattr(module, '__package__', None) is None:
try:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
except AttributeError:
pass
# Set loader if need be.
if not hasattr(module, '__loader__'):
try:
module.__loader__ = loader
except AttributeError:
pass
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
try:
lock = _get_module_lock(name)
finally:
_imp.release_lock()
lock.acquire()
try:
return _find_and_load_unlocked(name, import_)
finally:
lock.release()
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ("import of {} halted; "
"None in sys.modules".format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
# TODO(brett): In Python 3.4, have import raise
# ModuleNotFound and catch that.
if getattr(exc, '_not_found', False):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if not hasattr(module, '__loader__'):
if name in sys.builtin_module_names:
module.__loader__ = BuiltinImporter
#fix me brython
#elif _imp.is_frozen(name):
# module.__loader__ = FrozenImporter
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins'): #, 'marshal'):
if builtin_name not in sys.modules:
builtin_module = BuiltinImporter.load_module(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
os_details = ('posix', ['/']), ('nt', ['\\', '/']), ('os2', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = BuiltinImporter.load_module(builtin_os)
# TODO: rip out os2 code after 3.3 is released as per PEP 11
if builtin_os == 'os2' and 'EMX GCC' in sys.version:
path_sep = path_separators[1]
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
try:
thread_module = BuiltinImporter.load_module('_thread')
except ImportError:
# Python was built without threads
thread_module = None
weakref_module = BuiltinImporter.load_module('_weakref')
if builtin_os == 'nt':
winreg_module = BuiltinImporter.load_module('winreg')
setattr(self_module, '_winreg', winreg_module)
setattr(self_module, '_os', os_module)
setattr(self_module, '_thread', thread_module)
setattr(self_module, '_weakref', weakref_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', set(path_separators))
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
| agpl-3.0 |
tfiedor/perun | perun/collect/memory/parsing.py | 1 | 5708 | """This module provides methods for parsing raw memory data"""
import collections
import re
from decimal import Decimal
import perun.profile.convert as convert
import perun.collect.memory.syscalls as syscalls
__author__ = "Radim Podola"
PATTERN_WORD = re.compile(r"(\w+|[?])")
PATTERN_TIME = re.compile(r"\d+([,.]\d*)?|[,.]\d+")
PATTERN_HEXADECIMAL = re.compile(r"0x[0-9a-fA-F]+")
PATTERN_INT = re.compile(r"\d+")
UID_RESOURCE_MAP = collections.defaultdict(int)
def parse_stack(stack):
""" Parse stack information of one allocation
:param list stack: list of raw stack data
:returns list: list of formatted structures representing stack trace of one allocation
"""
data = []
for call in stack:
call_data = {}
# parsing name of function,
# it's the first word in the call record
func = PATTERN_WORD.search(call).group()
# demangling name of function
func = syscalls.demangle(func)
call_data.update({'function': func})
# parsing instruction pointer,
# it's the first hexadecimal number in the call record
instruction_pointer = PATTERN_HEXADECIMAL.search(call).group()
# getting information of instruction pointer,
# the source file and line number in the source file
ip_info = syscalls.address_to_line(instruction_pointer)
if ip_info[0] in ["?", "??"]:
ip_info[0] = "unreachable"
if ip_info[1] in ["?", "??"]:
ip_info[1] = 0
else:
ip_info[1] = PATTERN_INT.search(ip_info[1]).group()
call_data.update({'source': ip_info[0]})
call_data.update({'line': int(ip_info[1])})
data.append(call_data)
return data
def parse_allocation_location(trace):
""" Parse the location of user's allocation from stack trace
:param list trace: list representing stack call trace
:returns dict: first user's call to allocation
"""
for call in trace or []:
source = call['source']
if source != "unreachable":
return call
return {}
def parse_resources(allocation):
""" Parse resources of one allocation
:param list allocation: list of raw allocation data
:returns structure: formatted structure representing resources of one allocation
"""
data = {}
# parsing amount of allocated memory,
# it's the first number on the second line
amount = PATTERN_INT.search(allocation[1]).group()
data.update({'amount': int(amount)})
# parsing allocate function,
# it's the first word on the second line
allocator = PATTERN_WORD.search(allocation[1]).group()
data.update({'subtype': allocator})
# parsing address of allocated memory,
# it's the second number on the second line
address = PATTERN_INT.findall(allocation[1])[1]
data.update({'address': int(address)})
# parsing stack in the moment of allocation
# to getting trace of it
trace = parse_stack(allocation[2:])
data.update({'trace': trace})
# parsed data is memory type
data.update({'type': 'memory'})
# parsing call trace to get first user call
# to allocation function
data.update({'uid': parse_allocation_location(trace)})
# update the resource number
flattened_uid = convert.flatten(data['uid'])
UID_RESOURCE_MAP[flattened_uid] += 1
data.update({'allocation_order': UID_RESOURCE_MAP[flattened_uid]})
return data
def parse_log(filename, executable, snapshots_interval):
""" Parse raw data in the log file
:param string filename: name of the log file
:param Executable executable: profiled binary
:param int snapshots_interval: interval of snapshots [s]
:returns structure: formatted structure representing section "snapshots" and "global"
in memory profile
"""
interval = snapshots_interval
with open(filename) as logfile:
log = logfile.read()
# allocations are splitted by empty line
log = log.split('\n\n')
# Check that there is exit, and the Memory Log is thus not malformed
if log.pop().strip().find('EXIT') == -1:
raise ValueError
allocations = []
for item in log:
allocations.append(item.splitlines())
# Collect names and addresses for demangling and addr2line collective call
names, ips = set(), set()
for allocation in allocations:
for resource in allocation[2:]:
name, instruction_pointer, offset = resource.split(' ')
names.add(name)
ips.add((instruction_pointer, offset))
# Build caches for demangle and addr2line for further calls
syscalls.build_demangle_cache(names)
syscalls.build_address_to_line_cache(ips, executable.cmd)
snapshots = []
data = {}
data.update({'time': '{0:f}'.format(interval)})
data.update({'resources': []})
for allocation in allocations:
# parsing timestamp,
# it's the only one number on the 1st line
time_string = allocation[0]
# in some cases there is '.' instead of ',' in timestamp
if time_string.find(',') > 0:
time_string = time_string.replace(',', '.')
time = Decimal(PATTERN_TIME.search(time_string).group())
while time > interval:
snapshots.append(data)
interval += snapshots_interval
data = {}
data.update({'resources': []})
data.update({'time': '{0:f}'.format(interval)})
# using parse_resources()
# parsing resources,
data['resources'].append(parse_resources(allocation))
if data:
snapshots.append(data)
return {'snapshots': snapshots, 'global': {'resources': []}}
| gpl-3.0 |
msiedlarek/grpc | src/python/grpcio/grpc/framework/interfaces/face/utilities.py | 40 | 6871 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for RPC Framework's Face interface."""
import collections
# stream is referenced from specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import stream # pylint: disable=unused-import
from grpc.framework.interfaces.face import face
class _MethodImplementation(
face.MethodImplementation,
collections.namedtuple(
'_MethodImplementation',
['cardinality', 'style', 'unary_unary_inline', 'unary_stream_inline',
'stream_unary_inline', 'stream_stream_inline', 'unary_unary_event',
'unary_stream_event', 'stream_unary_event', 'stream_stream_event',])):
pass
def unary_unary_inline(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-unary RPC method as a callable value
that takes a request value and an face.ServicerContext object and
returns a response value.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.UNARY_UNARY, style.Service.INLINE, behavior,
None, None, None, None, None, None, None)
def unary_stream_inline(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-stream RPC method as a callable
value that takes a request value and an face.ServicerContext object and
returns an iterator of response values.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.UNARY_STREAM, style.Service.INLINE, None,
behavior, None, None, None, None, None, None)
def stream_unary_inline(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-unary RPC method as a callable
value that takes an iterator of request values and an
face.ServicerContext object and returns a response value.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.STREAM_UNARY, style.Service.INLINE, None, None,
behavior, None, None, None, None, None)
def stream_stream_inline(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-stream RPC method as a callable
value that takes an iterator of request values and an
face.ServicerContext object and returns an iterator of response values.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.STREAM_STREAM, style.Service.INLINE, None, None,
None, behavior, None, None, None, None)
def unary_unary_event(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-unary RPC method as a callable
value that takes a request value, a response callback to which to pass
the response value of the RPC, and an face.ServicerContext.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.UNARY_UNARY, style.Service.EVENT, None, None,
None, None, behavior, None, None, None)
def unary_stream_event(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a unary-stream RPC method as a callable
value that takes a request value, a stream.Consumer to which to pass the
the response values of the RPC, and an face.ServicerContext.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.UNARY_STREAM, style.Service.EVENT, None, None,
None, None, None, behavior, None, None)
def stream_unary_event(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-unary RPC method as a callable
value that takes a response callback to which to pass the response value
of the RPC and an face.ServicerContext and returns a stream.Consumer to
which the request values of the RPC should be passed.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.STREAM_UNARY, style.Service.EVENT, None, None,
None, None, None, None, behavior, None)
def stream_stream_event(behavior):
"""Creates an face.MethodImplementation for the given behavior.
Args:
behavior: The implementation of a stream-stream RPC method as a callable
value that takes a stream.Consumer to which to pass the response values
of the RPC and an face.ServicerContext and returns a stream.Consumer to
which the request values of the RPC should be passed.
Returns:
An face.MethodImplementation derived from the given behavior.
"""
return _MethodImplementation(
cardinality.Cardinality.STREAM_STREAM, style.Service.EVENT, None, None,
None, None, None, None, None, behavior)
| bsd-3-clause |
jscatena88/snapchat_republisher | bin/updateStory.py | 1 | 1855 | #!/usr/bin/env python
"""Adds whitelisted Friends and Sends Snaps to Story
Usage:
updateStory.py -u <username> [-p <password> -d <tmpdir> -sv] WHITELIST
Options:
-h Show usage
-u=<username> Username
-p=<password> Password(optional, will promp if ommitted)
-d=<tmpdir> Where to save the snaps [default: ./]
-s Save the snaps permanatly in tmpdir
-v Verbose
"""
import os.path
import sys
from getpass import getpass
from docopt import docopt
from snapchat_republisher import sendSnapToStory, addFriends
from pysnap import Snapchat
def main():
arguments = docopt(__doc__)
username = arguments['-u']
if arguments['-p'] is None:
password = getpass('Password:')
else:
password = arguments['-p']
path = arguments['-d']
save = arguments['-s']
verbose = arguments['-v']
whiteListFile = arguments['WHITELIST']
if not os.path.isdir(path):
print('No such directory: {0}'.format(path))
sys.exit(1)
s = Snapchat()
if verbose:
print('Attempting to log in as {0}.'.format(username))
if not s.login(username, password).get('logged'):
print('Invalid username or pasword')
sys.exit(1)
if verbose:
print('Attempting to open whitelist file at {0}.'.format(whiteListFile))
with open(whiteListFile, 'r') as f:
whitelist = [line.rstrip() for line in f]
if verbose:
print('Succesfully read whitelist and extracted {0} lines. Attempting to handle friends'.format(len(whitelist)))
#sys.exit(0)
addFriends(s,whitelist,verbose)
for snap in s.get_snaps():
if verbose:
print('Working with snap')
sendSnapToStory(s,snap,path,save,verbose)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
maysara/pandora_image | pandora/event/migrations/0001_initial.py | 1 | 20453 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Event'
db.create_table('event_event', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('defined', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='events', null=True, to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True)),
('name_sort', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, db_index=True)),
('name_find', self.gf('django.db.models.fields.TextField')(default='')),
('wikipediaId', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('alternativeNames', self.gf('ox.django.fields.TupleField')(default=[])),
('start', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('startTime', self.gf('django.db.models.fields.BigIntegerField')(default=None, null=True)),
('end', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('endTime', self.gf('django.db.models.fields.BigIntegerField')(default=None, null=True)),
('duration', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('durationTime', self.gf('django.db.models.fields.BigIntegerField')(default=None, null=True)),
('type', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('matches', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('event', ['Event'])
# Adding M2M table for field items on 'Event'
db.create_table('event_event_items', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['event.event'], null=False)),
('item', models.ForeignKey(orm['item.item'], null=False))
))
db.create_unique('event_event_items', ['event_id', 'item_id'])
# Adding M2M table for field annotations on 'Event'
db.create_table('event_event_annotations', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['event.event'], null=False)),
('annotation', models.ForeignKey(orm['annotation.annotation'], null=False))
))
db.create_unique('event_event_annotations', ['event_id', 'annotation_id'])
def backwards(self, orm):
# Deleting model 'Event'
db.delete_table('event_event')
# Removing M2M table for field items on 'Event'
db.delete_table('event_event_items')
# Removing M2M table for field annotations on 'Event'
db.delete_table('event_event_annotations')
models = {
'annotation.annotation': {
'Meta': {'object_name': 'Annotation'},
'clip': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'annotations'", 'null': 'True', 'to': "orm['clip.Clip']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.FloatField', [], {'default': '-1', 'db_index': 'True'}),
'findvalue': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'annotations'", 'to': "orm['item.Item']"}),
'layer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'public_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}),
'sortvalue': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.FloatField', [], {'default': '-1', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'clip.clip': {
'Meta': {'unique_together': "(('item', 'start', 'end'),)", 'object_name': 'Clip'},
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'end': ('django.db.models.fields.FloatField', [], {'default': '-1'}),
'findvalue': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'hue': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clips'", 'to': "orm['item.Item']"}),
'lightness': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'saturation': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'sort': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'matching_clips'", 'to': "orm['item.ItemSort']"}),
'sortvalue': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'start': ('django.db.models.fields.FloatField', [], {'default': '-1', 'db_index': 'True'}),
'subtitles': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'volume': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'event.event': {
'Meta': {'object_name': 'Event'},
'alternativeNames': ('ox.django.fields.TupleField', [], {'default': '[]'}),
'annotations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'events'", 'blank': 'True', 'to': "orm['annotation.Annotation']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'defined': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'durationTime': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'end': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'endTime': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'events'", 'blank': 'True', 'to': "orm['item.Item']"}),
'matches': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'name_find': ('django.db.models.fields.TextField', [], {'default': "''"}),
'name_sort': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'start': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'startTime': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'null': 'True', 'to': "orm['auth.User']"}),
'wikipediaId': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'})
},
'item.item': {
'Meta': {'object_name': 'Item'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('ox.django.fields.DictField', [], {'default': '{}'}),
'external_data': ('ox.django.fields.DictField', [], {'default': '{}'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'items'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemId': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'blank': 'True'}),
'json': ('ox.django.fields.DictField', [], {'default': '{}'}),
'level': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''"}),
'oxdbId': ('django.db.models.fields.CharField', [], {'max_length': '42', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'poster': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'poster_frame': ('django.db.models.fields.FloatField', [], {'default': '-1'}),
'poster_height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'poster_source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'poster_width': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rendered': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'stream_aspect': ('django.db.models.fields.FloatField', [], {'default': '1.3333333333333333'}),
'stream_info': ('ox.django.fields.DictField', [], {'default': '{}'}),
'torrent': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '1000', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'null': 'True', 'to': "orm['auth.User']"})
},
'item.itemsort': {
'Meta': {'object_name': 'ItemSort'},
'accessed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'aspectratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'bitrate': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'budget': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'cinematographer': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'cutsperminute': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'director': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'gross': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'hue': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sort'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['item.Item']"}),
'itemId': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'lightness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'numberofactors': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'numberofcuts': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'numberoffiles': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parts': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'pixels': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'producer': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'profit': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'random': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'resolution': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rightslevel': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'runtime': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'saturation': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'timesaccessed': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'volume': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'words': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'wordsperminute': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'writer': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['event'] | gpl-3.0 |
biospi/seamass-windeps | src/boost_1_57_0/libs/python/test/test_pointer_adoption.py | 46 | 1708 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from test_pointer_adoption_ext import *
>>> num_a_instances()
0
>>> a = create('dynamically allocated')
>>> num_a_instances()
1
>>> a.content()
'dynamically allocated'
>>> innards = a.get_inner()
>>> innards.change('with an exposed reference')
>>> a.content()
'with an exposed reference'
# The a instance should be kept alive...
>>> a = None
>>> num_a_instances()
1
# ...until we're done with its innards
>>> innards = None
>>> num_a_instances()
0
>>> b = B()
>>> a = create('another')
>>> b.a_content()
'empty'
>>> innards = b.adopt(a);
>>> b.a_content()
'another'
>>> num_a_instances()
1
>>> del a # innards and b are both holding a reference
>>> num_a_instances()
1
>>> innards.change('yet another')
>>> b.a_content()
'yet another'
>>> del innards
>>> num_a_instances() # b still owns a reference to a
1
>>> del b
>>> num_a_instances()
0
Test call policies for constructors here
>>> a = create('second a')
>>> num_a_instances()
1
>>> b = B(a)
>>> num_a_instances()
1
>>> a.content()
'second a'
>>> del a
>>> num_a_instances()
1
>>> b.a_content()
'second a'
>>> del b
>>> num_a_instances()
0
>>> assert as_A(create('dynalloc')) is not None
>>> base = Base()
>>> assert as_A(base) is None
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| apache-2.0 |
jackkiej/SickRage | lib/pgi/cffilib/gir/gicallableinfo.py | 19 | 2324 | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from .._compat import xrange
from ._ffi import ffi, lib
from .gibaseinfo import GIBaseInfo, GIInfoType
from .gitypeinfo import GITypeInfo
from .giarginfo import GITransfer, GIArgInfo
class GICallableInfo(GIBaseInfo):
def get_return_type(self):
return GITypeInfo(lib.g_callable_info_get_return_type(self._ptr))
@property
def caller_owns(self):
return GITransfer(lib.g_callable_info_get_caller_owns(self._ptr))
@property
def can_throw_gerror(self):
return lib.g_callable_info_can_throw_gerror(self._ptr)
@property
def may_return_null(self):
return lib.g_callable_info_may_return_null(self._ptr)
def get_return_attribute(self, name):
res = lib.g_callable_info_get_return_attribute(self._ptr)
if res:
return ffi.string(res)
def iterate_return_attributes(self):
it = ffi.new("GIAttributeIter*")
name = ffi.new("char**")
value = ffi.new("char**")
while lib.g_callable_info_iterate_return_attributes(
self._ptr, it, name, value):
yield (ffi.string(name[0]), ffi.string(value[0]))
@property
def n_args(self):
return lib.g_callable_info_get_n_args(self._ptr)
def get_arg(self, n):
return GIArgInfo(lib.g_callable_info_get_arg(self._ptr, n))
def get_args(self):
for i in xrange(self.n_args):
yield self.get_arg(i)
def load_arg(self, n, args):
# warning: lifetime bound to this info
info = ffi.new("GIArgInfo*")
lib.g_callable_info_load_arg(self._ptr, info)
return GIArgInfo(info)
def load_return_type(self):
# warning: lifetime bound to this info
info = ffi.new("GITypeInfo*")
lib.g_callable_info_load_return_type(self._ptr, info)
return GITypeInfo(info)
@property
def skip_return(self):
return lib.g_callable_info_skip_return(self._ptr)
@GIBaseInfo._register(GIInfoType.CALLBACK)
class GICallbackInfo(GICallableInfo):
pass
| gpl-3.0 |
shawnferry/ansible | lib/ansible/plugins/connections/paramiko_ssh.py | 4 | 14549 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ---
# The paramiko transport is provided because many distributions, in particular EL6 and before
# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
# control machine to be reasonably efficient with connections. Thus paramiko is faster
# for most users on these platforms. Users with ControlPersist capability can consider
# using -c ssh or configuring the transport in ansible.cfg.
import warnings
import os
import pipes
import socket
import random
import logging
import tempfile
import traceback
import fcntl
import re
import sys
from termios import tcflush, TCIFLUSH
from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
from ansible.utils.path import makedirs_safe
AUTHENTICITY_MSG="""
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
HAVE_PARAMIKO=False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import paramiko
HAVE_PARAMIKO=True
logging.getLogger("paramiko").setLevel(logging.WARNING)
except ImportError:
pass
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, new_stdin):
self._new_stdin = new_stdin
def missing_host_key(self, client, hostname, key):
if C.HOST_KEY_CHECKING:
# FIXME: need to fix lock file stuff
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
#fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
old_stdin = sys.stdin
sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
if inp not in ['yes','y','']:
# FIXME: lock file stuff
#fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
#fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
raise AnsibleError("host connection rejected by user")
# FIXME: lock file stuff
#fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'paramiko'
def _cache_key(self):
return "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
return self
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
port = self._connection_info.port or 22
self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr)
ssh = paramiko.SSHClient()
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if C.HOST_KEY_CHECKING:
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin))
allow_agent = True
if self._connection_info.password is not None:
allow_agent = False
try:
key_filename = None
if self._connection_info.private_key_file:
key_filename = os.path.expanduser(self._connection_info.private_key_file)
ssh.connect(
self._connection_info.remote_addr,
username=self._connection_info.remote_user,
allow_agent=allow_agent,
look_for_keys=True,
key_filename=key_filename,
password=self._connection_info.password,
timeout=self._connection_info.timeout,
port=port,
)
except Exception as e:
msg = str(e)
if "PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self._connection_info.remote_user, self._connection_info.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
raise AnsibleConnectionFailure(msg)
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansble.cfg), and we try
# to initialise from the calling environment
if C.PARAMIKO_PTY:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr)
no_prompt_out = ''
no_prompt_err = ''
become_output = ''
try:
chan.exec_command(cmd)
if self._connection_info.become_pass:
while True:
if success_key in become_output or \
(prompt and become_output.endswith(prompt)) or \
utils.su_prompts.check_su_prompt(become_output):
break
chunk = chan.recv(bufsize)
if not chunk:
if 'unknown user' in become_output:
raise AnsibleError(
'user %s does not exist' % become_user)
else:
raise AnsibleError('ssh connection ' +
'closed waiting for password prompt')
become_output += chunk
if success_key not in become_output:
if self._connection_info.become:
chan.sendall(self._connection_info.become_pass + '\n')
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = ''.join(chan.makefile('rb', bufsize))
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(in_path, out_path)
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
cache_key = "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)", e)
try:
self.sftp.get(in_path, out_path)
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
added_any = False
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename):
'''
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
'''
if not self._any_keys_added():
return False
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
f = open(filename, 'w')
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
f.close()
def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if self.sftp is not None:
self.sftp.close()
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
key_stat = os.stat(self.keyfile)
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, key_stat.st_mode & 0o7777)
os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
pass
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
| gpl-3.0 |
dschwen/libmesh | contrib/eigen/3.2.9/scripts/relicense.py | 315 | 2368 | # This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: mierle@gmail.com (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
| lgpl-2.1 |
luckielordie/conan | conans/test/integration/options_test.py | 1 | 7636 | import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANINFO
from conans.util.files import load
import os
class OptionsTest(unittest.TestCase):
def general_scope_options_test_package_test(self):
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
options = {"shared": ["1", "2"]}
def configure(self):
self.output.info("BUILD SHARED: %s" % self.options.shared)
"""
test = """from conans import ConanFile
class Pkg(ConanFile):
def test(self):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@user/testing -o *:shared=1")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 1", client.out)
client.run("create . Pkg/0.1@user/testing -o shared=2")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
# With test_package
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
client.run("create . Pkg/0.1@user/testing -o *:shared=1")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 1", client.out)
client.run("create . Pkg/0.1@user/testing -o Pkg:shared=2")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
error = client.run("create . Pkg/0.1@user/testing -o shared=1", ignore_error=True)
self.assertTrue(error)
self.assertIn("'options.shared' doesn't exist", client.out)
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile}, clean_first=True)
client.run("create . Pkg/0.1@user/testing -o *:shared=True")
self.assertIn("Pkg/0.1@user/testing: Calling build()", client.out)
error = client.run("create . Pkg/0.1@user/testing -o shared=False", ignore_error=True)
self.assertTrue(error)
self.assertIn("'options.shared' doesn't exist", client.out)
# With test_package
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
client.run("create . Pkg/0.1@user/testing -o *:shared=True")
self.assertIn("Pkg/0.1@user/testing: Calling build()", client.out)
self.assertIn("Pkg/0.1@user/testing (test package): Running build()", client.out)
def general_scope_priorities_test(self):
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
options = {"shared": ["1", "2", "3"]}
def configure(self):
self.output.info("BUILD SHARED: %s" % self.options.shared)
"""
test = """from conans import ConanFile
class Pkg(ConanFile):
def test(self):
pass
"""
client.save({"conanfile.py": conanfile})
# Consumer has priority
client.run("create . Pkg/0.1@user/testing -o *:shared=1 -o shared=2")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
# Consumer has priority over pattern, even if the pattern specifies the package name
client.run("create . Pkg/0.1@user/testing -o *:shared=1 -o Pkg:shared=2 -o shared=3")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 3", client.out)
# With test_package
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
# Sorted (longest, alphabetical) patterns, have priority
client.run("create . Pkg/0.1@user/testing -o *:shared=1 -o Pkg:shared=2")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
client.run("create . Pkg/0.1@user/testing -o Pk*:shared=2 -o P*:shared=1")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
client.run("create . Pkg/0.1@user/testing -o Pk*:shared=2 -o P*:shared=1")
self.assertIn("Pkg/0.1@user/testing: BUILD SHARED: 2", client.out)
def parsing_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class EqualerrorConan(ConanFile):
name = "equal"
version = "1.0.0"
options = {"opt": "ANY"}
default_options = ("opt=b=c",)
def build(self):
self.output.warn("OPTION %s" % self.options.opt)
'''
client.save({"conanfile.py": conanfile})
client.run("export . user/testing")
conanfile = '''
[requires]
equal/1.0.0@user/testing
[options]
equal:opt=a=b
'''
client.save({"conanfile.txt": conanfile}, clean_first=True)
client.run("install . --build=missing")
self.assertIn("OPTION a=b", client.user_io.out)
def basic_caching_test(self):
client = TestClient()
zlib = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "zlib"
version = "0.1"
options = {"shared": [True, False]}
default_options= "shared=False"
'''
client.save({"conanfile.py": zlib})
client.run("export . lasote/testing")
project = """[requires]
zlib/0.1@lasote/testing
"""
client.save({"conanfile.txt": project}, clean_first=True)
client.run("install . -o zlib:shared=True --build=missing")
self.assertIn("zlib/0.1@lasote/testing:2a623e3082a38f90cd2c3d12081161412de331b0",
client.user_io.out)
conaninfo = load(os.path.join(client.current_folder, CONANINFO))
self.assertIn("zlib:shared=True", conaninfo)
# Options not cached anymore
client.run("install . --build=missing")
self.assertIn("zlib/0.1@lasote/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9",
client.user_io.out)
conaninfo = load(os.path.join(client.current_folder, CONANINFO))
self.assertNotIn("zlib:shared=True", conaninfo)
def general_scope_options_test(self):
# https://github.com/conan-io/conan/issues/2538
client = TestClient()
conanfile_libA = """from conans import ConanFile
class LibA(ConanFile):
options = {"shared": [True, False]}
def configure(self):
self.output.info("shared=%s" % self.options.shared)
"""
client.save({"conanfile.py": conanfile_libA})
client.run("create . libA/0.1@danimtb/testing -o *:shared=True")
self.assertIn("libA/0.1@danimtb/testing: shared=True", client.out)
conanfile_libB = """from conans import ConanFile
class LibB(ConanFile):
options = {"shared": [True, False]}
requires = "libA/0.1@danimtb/testing"
def configure(self):
self.options["*"].shared = self.options.shared
self.output.info("shared=%s" % self.options.shared)
"""
for without_configure_line in [True, False]:
if without_configure_line:
conanfile = conanfile_libB.replace(
" self.options[\"*\"].shared = self.options.shared", "")
client.save({"conanfile.py": conanfile})
# Test info
client.run("info . -o *:shared=True", ignore_error=True)
self.assertIn("PROJECT: shared=True", client.out)
self.assertIn("libA/0.1@danimtb/testing: shared=True", client.out)
# Test create
client.run("create . libB/0.1@danimtb/testing -o *:shared=True")
self.assertIn("libB/0.1@danimtb/testing: shared=True", client.out)
self.assertIn("libA/0.1@danimtb/testing: shared=True", client.out)
# Test install
client.run("install . -o *:shared=True")
self.assertIn("PROJECT: shared=True", client.out)
self.assertIn("libA/0.1@danimtb/testing: shared=True", client.out)
| mit |
hujiajie/chromium-crosswalk | chrome/common/extensions/docs/server2/chained_compiled_file_system.py | 79 | 4205 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiled_file_system import CompiledFileSystem
from docs_server_utils import StringIdentity
from file_system import FileNotFoundError
from future import Future
from path_util import ToDirectory
class ChainedCompiledFileSystem(object):
'''A CompiledFileSystem implementation that fetches data from a chain of
possible FileSystems. The chain consists of some number of FileSystems which
may have cached data for their CompiledFileSystem instances (injected on
Factory construction) + the main FileSystem (injected at Creation time).
The expected configuration is that the main FileSystem is a PatchedFileSystem
and the chain the FileSystem which it patches, but with file systems
constructed via the HostFileSystemIterator the main FileSystems could be
anything.
This slightly unusual configuration is primarily needed to avoid re-compiling
data for PatchedFileSystems, which are very similar to the FileSystem which
they patch. Re-compiling data is expensive and a waste of memory resources.
ChainedCompiledFileSystem shares the data.
'''
class Factory(CompiledFileSystem.Factory):
def __init__(self, file_system_chain, object_store):
self._file_system_chain = file_system_chain
self._object_store = object_store
def Create(self, file_system, populate_function, cls, category=None):
return ChainedCompiledFileSystem(
# Chain of CompiledFileSystem instances.
tuple(CompiledFileSystem.Factory(self._object_store).Create(
fs, populate_function, cls, category=category)
for fs in [file_system] + self._file_system_chain),
# Identity, as computed by all file systems.
StringIdentity(*(fs.GetIdentity() for fs in self._file_system_chain)))
def __init__(self, compiled_fs_chain, identity):
'''|compiled_fs_chain| is a list of tuples (compiled_fs, file_system).
'''
assert len(compiled_fs_chain) > 0
self._compiled_fs_chain = compiled_fs_chain
self._identity = identity
def GetFromFile(self, path, skip_not_found=False):
return self._GetImpl(
path,
lambda cfs: cfs.GetFromFile(path, skip_not_found=skip_not_found),
lambda cfs: cfs._GetFileVersionFromCache(path))
def GetFromFileListing(self, path):
path = ToDirectory(path)
return self._GetImpl(
path,
lambda compiled_fs: compiled_fs.GetFromFileListing(path),
lambda compiled_fs: compiled_fs._GetFileListingVersionFromCache(path))
def _GetImpl(self, path, reader, version_getter):
# Strategy: Get the current version of |path| in main FileSystem, then run
# through |_compiled_fs_chain| in *reverse* to find the "oldest" FileSystem
# with an up-to-date version of that file.
#
# Obviously, if files have been added in the main FileSystem then none of
# the older FileSystems will be able to find it.
read_and_version_futures = [(reader(fs), version_getter(fs), fs)
for fs in self._compiled_fs_chain]
def resolve():
try:
# The first file system contains both files of a newer version and
# files shared with other compiled file systems. We are going to try
# each compiled file system in the reverse order and return the data
# when version matches. Data cached in other compiled file system will
# be reused whenever possible so that we don't need to recompile things
# that are not changed across these file systems.
first_version = read_and_version_futures[0][1].Get()
for (read_future,
version_future,
compiled_fs) in reversed(read_and_version_futures):
if version_future.Get() == first_version:
return read_future.Get()
except FileNotFoundError:
pass
# Try an arbitrary operation again to generate a realistic stack trace.
return read_and_version_futures[0][0].Get()
return Future(callback=resolve)
def GetIdentity(self):
return self._identity
| bsd-3-clause |
esotericnomen/concordance | concordance.py | 1 | 5106 | #! /usr/bin/env python
# coding: utf-8
"""
##############################################################################
# Description : This module reads the I/P document and prints #
# # Meaningful unique words #
# # Count of occurance in the document and #
# # Number of senses #
# Author : Rajkumar Ramasamy [rraj.be@gmail.com] #
##############################################################################
@ This program is free software: you can redistribute it and/or modify @
@ it under the terms of the GNU General Public License as published by @
@ the Free Software Foundation, either version 3 of the License, or @
@ (at your option) any later version. @
@ @
@ This program is distributed in the hope that it will be useful, @
@ but WITHOUT ANY WARRANTY; without even the implied warranty of @
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the @
@ GNU General Public License for more details. @
@ @
@ You should have received a copy of the GNU General Public License @
@ along with this program. If not, see <http://www.gnu.org/licenses/>. @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
"""
import sys
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
def print_summary():
print "This module reads the I/P document and prints \n \
# Meaningful unique words \n \
# Count of occurance in the document and \n \
# Number of senses "
print "Usage : \n \
$ " + sys.argv[0] +" input_file sort_mode \n\n \
input_file : Input text file which has to be analyzed\n \
sort_mode : 1 : Sort Lexicographically \n \
: 2 : Sort by number of occurances \n \
: 3 : sort by number of senses \n \
"
"""
Current stop words :
~~~~~~~~~~~~~~~~~~~~
a about above after again against ago all am an and any are as at be because
been before being below between both but by call can come could day did do
does doing don down during each few find first for from further get go had
has have having he her here hers herself him himself his how i if in into is
it its itself just like long look made make many may me more most my myself
no nor not now number of off oil on once one only or other our ours ourselves
out over own part people s said same see she should so some such t than that
the their theirs them themselves then there these they this those through time
to too two under until up use very was water way we were what when where which
while who whom why will with word would write you your yours yourself
yourselves
"""
def add_word(word):
print word
# Main Function
if __name__ == "__main__":
# Input arguments check
if(len(sys.argv) != 3):
print_summary()
sys.exit()
# Variables declaration
yes_print = 1
disable=0
wordList = dict()
concordList = []
greList = []
sort_mode = int(sys.argv[2])
l = WordNetLemmatizer()
# Read stop words
f_sw = open('stopwords.txt','r')
stopwords = f_sw.read()
# Read GRE words
f_grew = open('grewords_4k.txt','r')
#f_grew = open('grewords_8k.txt','r')
grewords = f_grew.read()
for word in grewords.split():
greList.append(word)
# Print summary of the module
#print_summary()
# Read the document into string
f = open(sys.argv[1],'r')
text = f.read()
#print text
# Split individual words in sentences
for word in text.split():
#remove punctuation, numbers, and newlines
word = word.translate(None,"0123456789,<>./?;:'\"{[]}\\=+_()*&^%$#@!~`’—")
# Convert all strings into lowercase
word = word.lower()
word = l.lemmatize(word)
# Add the word to the list
if wordList.has_key(word):
wordList[word] = wordList[word] + 1
else:
wordList[word] = 1
# Check if the word is valid one with meaning available in wordnet DB and its not a stop word
for rword in wordList:
wlen = len(wn.synsets(rword))
if rword not in stopwords and wlen is not 0:
if rword in greList:
entity=(rword,wordList[rword],wlen)
concordList.append(entity)
if yes_print:
# Sort Lexicographically
if (sort_mode == 1):
sorted_list = sorted(concordList,key=lambda concordList:concordList[0])
i = 0
for item in sorted_list:
print "%20s %2d %2d" % (sorted_list[i][0], sorted_list[i][1], sorted_list[i][2])
i=i+1
# Sort by frequency within the document
if (sort_mode == 2):
sorted_list = sorted(concordList,key=lambda concordList:concordList[1])
i = 0
for item in sorted_list:
print "%20s %2d %2d" % (sorted_list[i][0], sorted_list[i][1], sorted_list[i][2])
i=i+1
# Sort by senses in wordnet DB
if (sort_mode == 3):
sorted_list = sorted(concordList,key=lambda concordList:concordList[2])
i = 0
for item in sorted_list:
print "%20s %2d %2d" % (sorted_list[i][0], sorted_list[i][1], sorted_list[i][2])
i=i+1
if disable:
for word in grewords.split():
wlen = len(wn.synsets(word))
if wlen is not 0:
rword = l.lemmatize(word)
print rword
| gpl-3.0 |
remb0/CouchPotatoServer | libs/git/repository.py | 109 | 20401 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import Sequence
import re
import os
import subprocess
import sys
from . import branch
from . import tag
from . import commit
from . import config
from .files import ModifiedFile
from . import ref
from . import ref_container
from . import remotes
from .utils import quote_for_shell
from .utils import CommandString as CMD
#exceptions
from .exceptions import CannotFindRepository
from .exceptions import GitException
from .exceptions import GitCommandFailedException
from .exceptions import MergeConflict
from .exceptions import NonexistentRefException
BRANCH_ALIAS_MARKER = ' -> '
class Repository(ref_container.RefContainer):
_git_command = None
def setCommand(self, command):
self._git_command = command
############################# internal methods #############################
_loggingEnabled = False
def _getWorkingDirectory(self):
return '.'
def _logGitCommand(self, command, cwd):
if self._loggingEnabled:
print >> sys.stderr, ">>", command
def enableLogging(self):
self._loggingEnabled = True
def disableLogging(self):
self._loggingEnabled = False
def _executeGitCommand(self, command, cwd = None):
if cwd is None:
cwd = self._getWorkingDirectory()
command = '%s %s' % (self._git_command, str(command))
self._logGitCommand(command, cwd)
returned = subprocess.Popen(command,
shell = True,
cwd = cwd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
returned.wait()
return returned
def _executeGitCommandAssertSuccess(self, command, **kwargs):
returned = self._executeGitCommand(command, **kwargs)
assert returned.returncode is not None
if returned.returncode != 0:
raise GitCommandFailedException(kwargs.get('cwd', self._getWorkingDirectory()), command, returned)
return returned
def _getOutputAssertSuccess(self, command, **kwargs):
return self._executeGitCommandAssertSuccess(command, **kwargs).stdout.read()
def _getMergeBase(self, a, b):
raise NotImplementedError()
def getMergeBase(self, a, b):
repo = self
if isinstance(b, commit.Commit) and isinstance(b.repo, LocalRepository):
repo = b.repo
elif isinstance(a, commit.Commit) and isinstance(a.repo, LocalRepository):
repo = a.repo
return repo._getMergeBase(a, b)
############################## remote repositories #############################
class RemoteRepository(Repository):
def __init__(self, url, command = 'git'):
self.setCommand(command)
super(RemoteRepository, self).__init__()
self.url = url
def _getRefs(self, prefix = ''):
output = self._executeGitCommandAssertSuccess("ls-remote %s" % (self.url,))
for output_line in output.stdout:
commit, refname = output_line.split()
if refname.startswith(prefix):
yield refname[len(prefix):], commit.strip()
def _getRefsAsClass(self, prefix, cls):
return [cls(self, ref) for ref, _ in self._getRefs(prefix)]
def _getCommitByRefName(self, refname):
sha_by_ref = dict(self._getRefs())
for prefix in 'refs/tags/', 'refs/heads/':
sha = sha_by_ref.get(prefix + refname, None)
if sha is not None:
return commit.Commit(self, sha)
raise NonexistentRefException("Cannot find ref name %r in %s" % (refname, self))
def getBranches(self):
return self._getRefsAsClass('refs/heads/', branch.RemoteBranch)
def getTags(self):
return self._getRefsAsClass('refs/tags/', tag.RemoteTag)
############################## local repositories ##############################
class LocalRepository(Repository):
def __init__(self, path, command = 'git'):
self.setCommand(command)
super(LocalRepository, self).__init__()
self.path = path
self.config = config.GitConfiguration(self)
self._version = None
def __repr__(self):
return "<Git Repository at %s>" % (self.path,)
def _getWorkingDirectory(self):
return self.path
def _getCommitByHash(self, sha):
return commit.Commit(self, sha)
def _getCommitByRefName(self, name):
return commit.Commit(self, self._getOutputAssertSuccess("rev-parse %s" % name).strip())
def _getCommitByPartialHash(self, sha):
return self._getCommitByRefName(sha)
def getGitVersion(self):
if self._version is None:
version_output = self._getOutputAssertSuccess("version")
version_match = re.match(r"git\s+version\s+(\S+)[\s\(]?", version_output, re.I)
if version_match is None:
raise GitException("Cannot extract git version (unfamiliar output format %r?)" % version_output)
self._version = version_match.group(1)
return self._version
########################### Initializing a repository ##########################
def init(self, bare = False):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.isdir(self.path):
raise GitException("Cannot create repository in %s - "
"not a directory" % self.path)
self._executeGitCommandAssertSuccess("init %s" % ("--bare" if bare else ""))
def _asURL(self, repo):
if isinstance(repo, LocalRepository):
repo = repo.path
elif isinstance(repo, RemoteRepository):
repo = repo.url
elif not isinstance(repo, basestring):
raise TypeError("Cannot clone from %r" % (repo,))
return repo
def clone(self, repo):
self._executeGitCommandAssertSuccess("clone %s %s" % (self._asURL(repo), self.path), cwd = ".")
########################### Querying repository refs ###########################
def getBranches(self):
returned = []
for git_branch_line in self._executeGitCommandAssertSuccess("branch").stdout:
if git_branch_line.startswith("*"):
git_branch_line = git_branch_line[1:]
git_branch_line = git_branch_line.strip()
if BRANCH_ALIAS_MARKER in git_branch_line:
alias_name, aliased = git_branch_line.split(BRANCH_ALIAS_MARKER)
returned.append(branch.LocalBranchAlias(self, alias_name, aliased))
else:
returned.append(branch.LocalBranch(self, git_branch_line))
return returned
def getTags(self):
returned = []
for git_tag_line in self._executeGitCommandAssertSuccess("tag").stdout:
returned.append(tag.LocalTag(self, git_tag_line.strip()))
return returned
def _getCommits(self, specs, includeMerges):
command = "log --pretty=format:%%H %s" % specs
if not includeMerges:
command += " --no-merges"
for c in self._executeGitCommandAssertSuccess(command).stdout:
yield commit.Commit(self, c.strip())
def getCommits(self, start = None, end = "HEAD", includeMerges = True):
spec = self._normalizeRefName(start or "")
spec += ".."
spec += self._normalizeRefName(end)
return list(self._getCommits(spec, includeMerges = includeMerges))
def getCurrentBranch(self):
#todo: improve this method of obtaining current branch
for branch_name in self._executeGitCommandAssertSuccess("branch").stdout:
branch_name = branch_name.strip()
if not branch_name.startswith("*"):
continue
branch_name = branch_name[1:].strip()
if branch_name == '(no branch)':
return None
return self.getBranchByName(branch_name)
def getRemotes(self):
config_dict = self.config.getDict()
returned = []
for line in self._getOutputAssertSuccess("remote show -n").splitlines():
line = line.strip()
returned.append(remotes.Remote(self, line, config_dict.get('remote.%s.url' % line.strip())))
return returned
def getRemoteByName(self, name):
return self._getByName(self.getRemotes, name)
def _getMergeBase(self, a, b):
if isinstance(a, ref.Ref):
a = a.getHead()
if isinstance(b, ref.Ref):
b = b.getHead()
returned = self._executeGitCommand("merge-base %s %s" % (a, b))
if returned.returncode == 0:
return commit.Commit(self, returned.stdout.read().strip())
# make sure this is not a misc. error with git
unused = self.getHead()
return None
################################ Querying Status ###############################
def containsCommit(self, commit):
try:
self._executeGitCommandAssertSuccess("log -1 %s" % (commit,))
except GitException:
return False
return True
def getHead(self):
return self._getCommitByRefName("HEAD")
def _getFiles(self, *flags):
flags = ["--exclude-standard"] + list(flags)
return [f.strip()
for f in self._getOutputAssertSuccess("ls-files %s" % (" ".join(flags))).splitlines()]
def _getRawDiff(self, *flags, **options):
match_statuses = options.pop('fileStatuses', None)
if match_statuses is not None and not isinstance(match_statuses, Sequence):
raise ValueError("matchedStatuses must be a sequence")
if options:
raise TypeError("Unknown arguments specified: %s" % ", ".join(options))
flags = " ".join(str(f) for f in flags)
modified_files = []
for line in self._getOutputAssertSuccess("diff --raw %s" % flags).splitlines():
file_status = line.split()[-2]
file_name = line.split()[-1]
if match_statuses is None or file_status in match_statuses:
modified_files.append(ModifiedFile(file_name))
return modified_files
def getStagedFiles(self):
if self.isInitialized():
return self._getRawDiff('--cached')
return self._getFiles()
def getUnchangedFiles(self):
return self._getFiles()
def getChangedFiles(self):
return self._getRawDiff()
def getDeletedFiles(self):
return self._getRawDiff(fileStatuses = ['D'])
def getUntrackedFiles(self):
return self._getFiles("--others")
def isInitialized(self):
try:
self.getHead()
return True
except GitException:
return False
def isValid(self):
return os.path.isdir(os.path.join(self.path, ".git")) or \
(os.path.isfile(os.path.join(self.path, "HEAD")) and os.path.isdir(os.path.join(self.path, "objects")))
def isWorkingDirectoryClean(self):
return not (self.getUntrackedFiles() or self.getChangedFiles() or self.getStagedFiles())
def __contains__(self, thing):
if isinstance(thing, basestring) or isinstance(thing, commit.Commit):
return self.containsCommit(thing)
raise NotImplementedError()
################################ Staging content ###############################
def add(self, path):
self._executeGitCommandAssertSuccess("add %s" % quote_for_shell(path))
def delete(self, path, recursive = False, force = False):
flags = ""
if recursive:
flags += "-r "
if force:
flags += "-f "
self._executeGitCommandAssertSuccess("rm %s%s" % (flags, quote_for_shell(path)))
def addAll(self):
return self.add('.')
################################## Committing ##################################
def _normalizeRefName(self, thing):
if isinstance(thing, ref.Ref):
thing = thing.getNormalizedName()
return str(thing)
def _deduceNewCommitFromCommitOutput(self, output):
for pattern in [
# new-style commit pattern
r"^\[\S+\s+(?:\(root-commit\)\s+)?(\S+)\]",
]:
match = re.search(pattern, output)
if match:
return commit.Commit(self, match.group(1))
return None
def commit(self, message, allowEmpty = False, commitAll = False):
args = ''
if commitAll:
args = args + '--all'
command = "commit %s -m %s" % (args, quote_for_shell(message))
if allowEmpty:
command += " --allow-empty"
output = self._getOutputAssertSuccess(command)
return self._deduceNewCommitFromCommitOutput(output)
################################ Changing state ################################
def _createBranchOrTag(self, objname, name, startingPoint, returned_class):
command = "%s %s " % (objname, name)
if startingPoint is not None:
command += self._normalizeRefName(startingPoint)
self._executeGitCommandAssertSuccess(command)
return returned_class(self, name)
def createBranch(self, name, startingPoint = None):
return self._createBranchOrTag('branch', name, startingPoint, branch.LocalBranch)
def createTag(self, name, startingPoint = None):
return self._createBranchOrTag('tag', name, startingPoint, tag.LocalTag)
def checkout(self, thing = None, targetBranch = None, files = ()):
if thing is None:
thing = ""
command = "checkout %s" % (self._normalizeRefName(thing),)
if targetBranch is not None:
command += " -b %s" % (targetBranch,)
if files:
command += " -- %s" % " ".join(files)
self._executeGitCommandAssertSuccess(command)
def mergeMultiple(self, srcs, allowFastForward = True, log = False, message = None):
try:
self._executeGitCommandAssertSuccess(CMD("merge",
" ".join(self._normalizeRefName(src) for src in srcs),
"--no-ff" if not allowFastForward else None,
"--log" if log else None,
("-m \"%s\"" % message) if message is not None else None))
except GitCommandFailedException, e:
# git-merge tends to ignore the stderr rule...
output = e.stdout + e.stderr
if 'conflict' in output.lower():
raise MergeConflict()
raise
def merge(self, src, *args, **kwargs):
return self.mergeMultiple([src], *args, **kwargs)
def _reset(self, flag, thing):
command = "reset %s %s" % (
flag,
self._normalizeRefName(thing))
self._executeGitCommandAssertSuccess(command)
def resetSoft(self, thing = "HEAD"):
return self._reset("--soft", thing)
def resetHard(self, thing = "HEAD"):
return self._reset("--hard", thing)
def resetMixed(self, thing = "HEAD"):
return self._reset("--mixed", thing)
def _clean(self, flags):
self._executeGitCommandAssertSuccess("clean -q " + flags)
def cleanIgnoredFiles(self):
"""Cleans files that match the patterns in .gitignore"""
return self._clean("-f -X")
def cleanUntrackedFiles(self):
return self._clean("-f -d")
################################# collaboration ################################
def addRemote(self, name, url):
self._executeGitCommandAssertSuccess("remote add %s %s" % (name, url))
return remotes.Remote(self, name, url)
def fetch(self, repo = None):
command = "fetch"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def pull(self, repo = None):
command = "pull"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def _getRefspec(self, fromBranch = None, toBranch = None, force = False):
returned = ""
if fromBranch is not None:
returned += self._normalizeRefName(fromBranch)
if returned or toBranch is not None:
returned += ":"
if toBranch is not None:
if isinstance(toBranch, branch.RegisteredRemoteBranch):
toBranch = toBranch.name
returned += self._normalizeRefName(toBranch)
if returned and force:
returned = "+%s" % returned
return returned
def push(self, remote = None, fromBranch = None, toBranch = None, force = False):
command = "push"
#build push arguments
refspec = self._getRefspec(toBranch = toBranch, fromBranch = fromBranch, force = force)
if refspec and not remote:
remote = "origin"
if isinstance(remote, remotes.Remote):
remote = remote.name
elif isinstance(remote, RemoteRepository):
remote = remote.url
elif isinstance(remote, LocalRepository):
remote = remote.path
if remote is not None and not isinstance(remote, basestring):
raise TypeError("Invalid type for 'remote' parameter: %s" % (type(remote),))
command = "push %s %s" % (remote if remote is not None else "", refspec)
self._executeGitCommandAssertSuccess(command)
def rebase(self, src):
self._executeGitCommandAssertSuccess("rebase %s" % self._normalizeRefName(src))
#################################### Stashes ###################################
def saveStash(self, name = None):
command = "stash save"
if name is not None:
command += " %s" % name
self._executeGitCommandAssertSuccess(command)
def popStash(self, arg = None):
command = "stash pop"
if arg is not None:
command += " %s" % arg
self._executeGitCommandAssertSuccess(command)
################################# Configuration ################################
################################### Shortcuts ##################################
def clone(source, location):
returned = LocalRepository(location)
returned.clone(source)
return returned
def find_repository():
orig_path = path = os.path.realpath('.')
drive, path = os.path.splitdrive(path)
while path:
current_path = os.path.join(drive, path)
current_repo = LocalRepository(current_path)
if current_repo.isValid():
return current_repo
path, path_tail = os.path.split(current_path)
if not path_tail:
raise CannotFindRepository("Cannot find repository for %s" % (orig_path,))
| gpl-3.0 |
tardyp/buildbot | master/buildbot/db/migrate/versions/058_add_build_data_length.py | 5 | 1187 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from buildbot.util import sautils
def upgrade(migrate_engine):
# build_data without the 'length' column has never been released, so we don't care about
# correct values there.
metadata = sa.MetaData()
metadata.bind = migrate_engine
build_data_table = sautils.Table('build_data', metadata, autoload=True)
length_column = sa.Column('length', sa.Integer, nullable=False, server_default='0')
length_column.create(build_data_table)
| gpl-2.0 |
caisq/tensorflow | tensorflow/python/kernel_tests/lrn_op_test.py | 138 | 5675 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class LRNOpTest(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.test_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
def testGradientsZeroInput(self):
with self.test_session(use_gpu=True):
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype):
with self.test_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
| apache-2.0 |
boyleworkflow/boyle | boyleworkflow/core.py | 1 | 2520 | from typing import Mapping, Union, Any, Iterable, NewType, Sequence, Tuple, List
import itertools
import attr
from boyleworkflow.util import id_property
from boyleworkflow.storage import Storage, Digest
from boyleworkflow.loc import Loc, check_valid_loc, normalize_loc
@attr.s(auto_attribs=True, frozen=True)
class Result:
loc: Loc = attr.ib()
digest: Digest
@loc.validator
def validate(instance, attribute, value):
check_valid_loc(value)
class Op:
definition: str
op_id: str
def run(
self,
inputs: Iterable[Result],
out_locs: Iterable[Loc],
storage: Storage,
) -> Iterable[Result]:
raise NotImplemented
def _make_tuple_sorted_by_loc(items) -> Tuple:
items = sorted(items, key=lambda x: x.loc)
return tuple(items)
def _validate_input_locs(items):
for item in items:
check_valid_loc(item.loc)
seen_locs = set()
for item in items:
if item.loc in seen_locs:
raise ValueError(f"multiple definitions of loc '{loc}'")
@attr.s(auto_attribs=True, frozen=True)
class Calc:
op: Op
inputs: Tuple[Result, ...] = attr.ib(converter=_make_tuple_sorted_by_loc)
@inputs.validator
def validate(instance, attribute, value):
_validate_input_locs(value)
@id_property
def calc_id(self):
value = attr.asdict(self)
value["inputs"] = dict(value["inputs"])
return value
def validate_out_loc(instance, attribute, value):
check_valid_loc(value)
@attr.s(auto_attribs=True, frozen=True)
class Comp:
op: Op
parents: Tuple["Comp", ...] = attr.ib(converter=_make_tuple_sorted_by_loc)
loc: Loc = attr.ib(converter=normalize_loc)
@parents.validator
def validate(instance, attribute, value):
_validate_input_locs(value)
@loc.validator
def validate(instance, attribute, value):
check_valid_loc(value)
@id_property
def comp_id(self):
return {
"op_id": self.op.op_id,
"input_ids": [parent.comp_id for parent in self.parents],
"loc": self.loc,
}
def get_parents(comps: Iterable[Comp]) -> Iterable[Comp]:
return list(itertools.chain(*(comp.parents for comp in comps)))
def get_upstream_sorted(requested: Iterable[Comp]) -> Sequence[Comp]:
chunks: List[Iterable[Comp]] = []
new: Iterable[Comp] = list(requested)
while new:
chunks.insert(0, new)
new = get_parents(new)
return list(itertools.chain(*chunks))
| lgpl-3.0 |
WSDC-NITWarangal/django | tests/aggregation_regress/models.py | 282 | 3288 | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
class ItemTag(models.Model):
tag = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
@python_2_unicode_compatible
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher, models.CASCADE)
pubdate = models.DateField()
tags = GenericRelation(ItemTag)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
class Entries(models.Model):
EntryID = models.AutoField(primary_key=True, db_column='Entry ID')
Entry = models.CharField(unique=True, max_length=50)
Exclude = models.BooleanField(default=False)
class Clues(models.Model):
ID = models.AutoField(primary_key=True)
EntryID = models.ForeignKey(Entries, models.CASCADE, verbose_name='Entry', db_column='Entry ID')
Clue = models.CharField(max_length=150)
class WithManualPK(models.Model):
# The generic relations regression test needs two different model
# classes with the same PK value, and there are some (external)
# DB backends that don't work nicely when assigning integer to AutoField
# column (MSSQL at least).
id = models.IntegerField(primary_key=True)
@python_2_unicode_compatible
class HardbackBook(Book):
weight = models.FloatField()
def __str__(self):
return "%s (hardback): %s" % (self.name, self.weight)
# Models for ticket #21150
class Alfa(models.Model):
name = models.CharField(max_length=10, null=True)
class Bravo(models.Model):
pass
class Charlie(models.Model):
alfa = models.ForeignKey(Alfa, models.SET_NULL, null=True)
bravo = models.ForeignKey(Bravo, models.SET_NULL, null=True)
class SelfRefFK(models.Model):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
| bsd-3-clause |
clouddocx/boto | tests/unit/s3/test_lifecycle.py | 115 | 3946 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from boto.s3.lifecycle import Rule, Lifecycle, Transition
class TestS3LifeCycle(AWSMockServiceTestCase):
connection_class = S3Connection
def default_body(self):
return """
<LifecycleConfiguration>
<Rule>
<ID>rule-1</ID>
<Prefix>prefix/foo</Prefix>
<Status>Enabled</Status>
<Transition>
<Days>30</Days>
<StorageClass>GLACIER</StorageClass>
</Transition>
<Expiration>
<Days>365</Days>
</Expiration>
</Rule>
<Rule>
<ID>rule-2</ID>
<Prefix>prefix/bar</Prefix>
<Status>Disabled</Status>
<Transition>
<Date>2012-12-31T00:00:000Z</Date>
<StorageClass>GLACIER</StorageClass>
</Transition>
</Rule>
</LifecycleConfiguration>
"""
def test_parse_lifecycle_response(self):
self.set_http_response(status_code=200)
bucket = Bucket(self.service_connection, 'mybucket')
response = bucket.get_lifecycle_config()
self.assertEqual(len(response), 2)
rule = response[0]
self.assertEqual(rule.id, 'rule-1')
self.assertEqual(rule.prefix, 'prefix/foo')
self.assertEqual(rule.status, 'Enabled')
self.assertEqual(rule.expiration.days, 365)
self.assertIsNone(rule.expiration.date)
transition = rule.transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(response[1].transition.date, '2012-12-31T00:00:000Z')
def test_expiration_with_no_transition(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', 'prefix', 'Enabled', 30)
xml = lifecycle.to_xml()
self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
def test_expiration_is_optional(self):
t = Transition(days=30, storage_class='GLACIER')
r = Rule('myid', 'prefix', 'Enabled', expiration=None,
transition=t)
xml = r.to_xml()
self.assertIn(
'<Transition><StorageClass>GLACIER</StorageClass><Days>30</Days>',
xml)
def test_expiration_with_expiration_and_transition(self):
t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER')
r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t)
xml = r.to_xml()
self.assertIn(
'<Transition><StorageClass>GLACIER</StorageClass>'
'<Date>2012-11-30T00:00:000Z</Date>', xml)
self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
| mit |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/software-center/softwarecenter/backend/installbackend.py | 2 | 3088 | # Copyright (C) 2010 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from softwarecenter.utils import UnimplementedError
class InstallBackend(object):
def __init__(self):
self.pending_transactions = {}
self.pending_purchases = []
def upgrade(self, pkgname, appname, iconname, addons_install=[], addons_remove=[], metadata=None):
pass
def remove(self, pkgname, appname, iconname, addons_install=[], addons_remove=[], metadata=None):
pass
def remove_multiple(self, pkgnames, appnames, iconnames, addons_install=[], addons_remove=[], metadatas=None):
pass
def install(self, pkgname, appname, iconname, filename=None, addons_install=[], addons_remove=[], metadata=None):
pass
def install_multiple(self, pkgnames, appnames, iconnames, addons_install=[], addons_remove=[], metadatas=None):
pass
def apply_changes(self, pkgname, appname, iconname, addons_install=[], addons_remove=[], metadata=None):
pass
def reload(self, sources_list=None, metadata=None):
""" reload package list """
pass
class InstallBackendUI(object):
def ask_config_file_conflict(self, old, new):
""" show a conffile conflict and ask what to do
Return "keep" to keep the old one
"replace" to replace the old with the new one
"""
raise UnimplementedError("need custom ask_config_file_conflict method")
def ask_medium_required(self, medium, drive):
""" ask the user to provide a medium in drive
return True if medium is provided, False to cancel
"""
raise UnimplementedError("need custom ask_medium_required method")
def error(self, parent, primary, secondary, details=None, alternative_action=None):
""" show an error dialog """
raise UnimplementedError("need custom error method")
# singleton
install_backend = None
def get_install_backend():
global install_backend
if install_backend is None:
from softwarecenter.enums import USE_PACKAGEKIT_BACKEND
if not USE_PACKAGEKIT_BACKEND:
from softwarecenter.backend.installbackend_impl.aptd import AptdaemonBackend
install_backend = AptdaemonBackend()
else:
from softwarecenter.backend.installbackend_impl.packagekitd import PackagekitBackend
install_backend = PackagekitBackend()
return install_backend
| gpl-3.0 |
daeseokyoun/youtube-dl | youtube_dl/extractor/addanime.py | 29 | 3281 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
qualities,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
_TESTS = [{
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
'md5': '72954ea10bc979ab5e2eb288b21425a0',
'info_dict': {
'id': '24MR3YO5SAS9',
'ext': 'mp4',
'description': 'One Piece 606',
'title': 'One Piece 606',
}
}, {
'url': 'http://add-anime.net/video/MDUGWYKNGBD8/One-Piece-687',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
ee.cause.code != 503:
raise
redir_webpage = ee.cause.read().decode('utf-8')
action = self._search_regex(
r'<form id="challenge-form" action="([^"]+)"',
redir_webpage, 'Redirect form')
vc = self._search_regex(
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
redir_webpage, 'redirect vc value')
av = re.search(
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError('Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
note='Confirming after redirect')
webpage = self._download_webpage(url, video_id)
FORMATS = ('normal', 'hq')
quality = qualities(FORMATS)
formats = []
for format_id in FORMATS:
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
video_url = self._search_regex(rex, webpage, 'video file URLx',
fatal=False)
if not video_url:
continue
formats.append({
'format_id': format_id,
'url': video_url,
'quality': quality(format_id),
})
self._sort_formats(formats)
video_title = self._og_search_title(webpage)
video_description = self._og_search_description(webpage)
return {
'_type': 'video',
'id': video_id,
'formats': formats,
'title': video_title,
'description': video_description
}
| unlicense |
rvanlaar/easy-transifex | src/transifex/transifex/resources/tests/lib/mozilla_properties/__init__.py | 1 | 5569 | # -*- coding: utf-8 -*-
import os, chardet
import unittest
from transifex.txcommon.tests.base import BaseTestCase
from transifex.languages.models import Language
from transifex.resources.models import *
from transifex.resources.formats.mozillaproperties import MozillaPropertiesHandler
from transifex.addons.suggestions.models import Suggestion
class TestMozillaProperties(BaseTestCase):
"""Suite of tests for the propertiesfile lib."""
def setUp(self):
super(TestMozillaProperties, self).setUp()
self.resource.i18n_method = 'MOZILLA_PROPERTIES'
self.resource.save()
def test_escaped(self):
j = MozillaPropertiesHandler()
self.assertFalse(j._is_escaped(r"es blah", 2))
self.assertTrue(j._is_escaped(r"e\ blah", 2))
self.assertFalse(j._is_escaped(r"\\ blah", 2))
self.assertTrue(j._is_escaped(r"e\\\ blah", 4))
def test_accept(self):
parser = MozillaPropertiesHandler()
self.assertTrue(parser.accepts('MOZILLAPROPERTIES'))
def test_split(self):
j = MozillaPropertiesHandler()
res = j._split("asd sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd=sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd:sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split("asd\tsadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\ =sadsf")
self.assertEqual(res[0], "asd\ ")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd = sadsf")
self.assertEqual(res[0], "asd")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\\=sadsf")
self.assertEqual(res[0], r"asd\\")
self.assertEqual(res[1], "sadsf")
res = j._split(r"asd\\\=sadsf")
self.assertEqual(res[0], r"asd\\\=sadsf")
self.assertEqual(res[1], None)
res = j._split(r"asd\\\\=sadsf")
self.assertEqual(res[0], r"asd\\\\")
self.assertEqual(res[1], "sadsf")
res = j._split(r"Key21\:WithColon : Value21")
self.assertEqual(res[0], r"Key21\:WithColon")
self.assertEqual(res[1], "Value21")
def test_properties_parser(self):
"""PROPERTIES file tests."""
# Parsing PROPERTIES file
handler = MozillaPropertiesHandler(
os.path.join(os.path.dirname(__file__), 'complex.properties')
)
handler.set_language(self.resource.source_language)
handler.parse_file(is_source=True)
self.stringset = handler.stringset
entities = 0
translations = 0
for s in self.stringset.strings:
entities += 1
if s.translation.strip() != '':
translations += 1
# Asserting number of entities - PROPERTIES file has 25 entries.
# we ignore keys without a value
self.assertEqual(entities, 25)
self.assertEqual(translations, 25)
def test_properties_save2db(self, delete=True):
"""Test creating source strings from a PROPERTIES file works"""
handler = MozillaPropertiesHandler(
os.path.join(os.path.dirname(__file__), 'complex.properties')
)
handler.set_language(self.resource.source_language)
handler.parse_file(is_source=True)
r = self.resource
l = self.resource.source_language
handler.bind_resource(r)
handler.save2db(is_source=True)
# Check that all 25 entities are created in the db
self.assertEqual( SourceEntity.objects.filter(resource=r).count(), 25)
# Check that all source translations are there
self.assertEqual(
len(Translation.objects.filter(source_entity__resource=r, language=l)), 25
)
# Import and save the finish translation
handler.bind_file(os.path.join(os.path.dirname(__file__),'complex_hi_IN.properties'))
l = Language.objects.get(code='hi_IN')
handler.set_language(l)
handler.parse_file()
entities = 0
translations = 0
for s in handler.stringset.strings:
entities += 1
if s.translation.strip() != '':
translations += 1
self.assertEqual(entities, 23)
self.assertEqual(translations, 23)
handler.save2db()
# Check if all Source strings are untouched
self.assertEqual(SourceEntity.objects.filter(resource=r).count(), 25)
# Check that all translations are there
self.assertEqual(len(Translation.objects.filter(source_entity__resource=r,
language=l)), 23)
if delete:
r.delete()
else:
return r
def test_properties_compile(self):
"""Test compiling translations for PROPERTIES files"""
self.test_properties_save2db(delete=False)
handler = MozillaPropertiesHandler()
handler.bind_resource(self.resource)
handler.set_language(self.resource.source_language)
old_template = handler.compiled_template
handler.compile()
self.assertNotEqual(old_template, handler.compiled_template)
handler.set_language(Language.objects.get(code='hi_IN'))
old_template = handler.compiled_template
handler.compile()
self.assertNotEqual(old_template, handler.compiled_template)
#Cleanup
self.resource.delete()
| bsd-2-clause |
figment/falloutsnip | Vendor/IronPython/Lib/gzip.py | 78 | 18226 | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in range(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| gpl-3.0 |
simonjbeaumont/sm | drivers/journaler.py | 12 | 8585 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# LVM-based journaling
import util
from srmetadata import open_file, close, get_min_blk_size_wrapper, \
file_read_wrapper, file_write_wrapper
LVM_MAX_NAME_LEN = 127
class JournalerException(util.SMException):
pass
class Journaler:
"""Simple journaler that uses LVM namespace for persistent "storage".
A journal is a id-value pair, and there can be only one journal for a
given id."""
LV_SIZE = 4 * 1024 * 1024 # minimum size
LV_TAG = "journaler"
SEPARATOR = "_"
JRN_CLONE = "clone"
JRN_LEAF = "leaf"
def __init__(self, lvmCache):
self.vgName = lvmCache.vgName
self.lvmCache = lvmCache
def create(self, type, id, val):
"""Create an entry of type "type" for "id" with the value "val".
Error if such an entry already exists."""
valExisting = self.get(type, id)
writeData = False
if valExisting:
raise JournalerException("Journal already exists for '%s:%s': %s" \
% (type, id, valExisting))
lvName = self._getNameLV(type, id, val)
mapperDevice = self._getLVMapperName(lvName)
if len(mapperDevice) > LVM_MAX_NAME_LEN:
lvName = self._getNameLV(type, id)
writeData = True
mapperDevice = self._getLVMapperName(lvName)
assert len(mapperDevice) <= LVM_MAX_NAME_LEN
self.lvmCache.create(lvName, self.LV_SIZE, self.LV_TAG)
if writeData:
fullPath = self.lvmCache._getPath(lvName)
fd = open_file(fullPath, True)
try:
e = None
try:
min_block_size = get_min_blk_size_wrapper(fd)
data = "%d %s" % (len(val), val)
file_write_wrapper(fd, 0, min_block_size, data, len(data))
except Exception, e:
raise
finally:
try:
close(fd)
self.lvmCache.deactivateNoRefcount(lvName)
except Exception, e2:
msg = 'failed to close/deactivate %s: %s' \
% (lvName, e2)
if not e:
util.SMlog(msg)
raise e2
else:
util.SMlog('WARNING: %s (error ignored)' % msg)
except:
util.logException("journaler.create")
try:
self.lvmCache.remove(lvName)
except Exception, e:
util.SMlog('WARNING: failed to clean up failed journal ' \
' creation: %s (error ignored)' % e)
raise JournalerException("Failed to write to journal %s" \
% lvName)
def remove(self, type, id):
"""Remove the entry of type "type" for "id". Error if the entry doesn't
exist."""
val = self.get(type, id)
if not val:
raise JournalerException("No journal for '%s:%s'" % (type, id))
lvName = self._getNameLV(type, id, val)
mapperDevice = self._getLVMapperName(lvName)
if len(mapperDevice) > LVM_MAX_NAME_LEN:
lvName = self._getNameLV(type, id)
self.lvmCache.remove(lvName)
def get(self, type, id):
"""Get the value for the journal entry of type "type" for "id".
Return None if no such entry exists"""
entries = self._getAllEntries()
if not entries.get(type):
return None
return entries[type].get(id)
def getAll(self, type):
"""Get a mapping id->value for all entries of type "type"."""
entries = self._getAllEntries()
if not entries.get(type):
return dict()
return entries[type]
def hasJournals(self, id):
"""Return True if there any journals for "id", False otherwise"""
# Pass False as an argument to skip opening journal files
entries = self._getAllEntries(False)
for type, ids in entries.iteritems():
if ids.get(id):
return True
return False
def _getNameLV(self, type, id, val = 1):
return "%s%s%s%s%s" % (type, self.SEPARATOR, id, self.SEPARATOR, val)
def _getAllEntries(self, readFile = True):
lvList = self.lvmCache.getTagged(self.LV_TAG)
entries = dict()
for lvName in lvList:
parts = lvName.split(self.SEPARATOR, 2)
if len(parts) != 3:
raise JournalerException("Bad LV name: %s" % lvName)
type, id, val = parts
if readFile:
# For clone and leaf journals, additional
# data is written inside file
# TODO: Remove dependency on journal type
if type == self.JRN_CLONE or type == self.JRN_LEAF:
fullPath = self.lvmCache._getPath(lvName)
self.lvmCache.activateNoRefcount(lvName,False)
fd = open_file(fullPath)
try:
try:
min_block_size = get_min_blk_size_wrapper(fd)
data = file_read_wrapper(fd, 0, min_block_size, min_block_size)
length, val = data.split(" ", 1)
val = val[:int(length)]
except:
raise JournalerException("Failed to read from journal %s" \
% lvName)
finally:
close(fd)
self.lvmCache.deactivateNoRefcount(lvName)
if not entries.get(type):
entries[type] = dict()
entries[type][id] = val
return entries
def _getLVMapperName(self, lvName):
return '%s-%s' % (self.vgName.replace("-", "--"), lvName)
###########################################################################
#
# Unit tests
#
import lvutil
import lvmcache
def _runTests(vgName):
"""Unit testing"""
print "Running unit tests..."
if not vgName:
print "Error: missing VG name param"
return 1
if not lvutil._checkVG(vgName):
print "Error: VG %s not found" % vgName
return 1
j = Journaler(lvmcache.LVMCache(vgName))
if j.get("clone", "1"):
print "get non-existing failed"
return 1
j.create("clone", "1", "a")
val = j.get("clone", "1")
if val != "a":
print "create-get failed"
return 1
j.remove("clone", "1")
if j.get("clone", "1"):
print "remove failed"
return 1
j.create("modify", "X", "831_3")
j.create("modify", "Z", "831_4")
j.create("modify", "Y", "53_0")
val = j.get("modify", "X")
if val != "831_3":
print "create underscore_val failed"
return 1
val = j.get("modify", "Y")
if val != "53_0":
print "create multiple id's failed"
return 1
entries = j.getAll("modify")
if not entries.get("X") or not entries.get("Y") or \
entries["X"] != "831_3" or entries["Y"] != "53_0":
print "getAll failed: %s" % entries
return 1
j.remove("modify", "X")
val = j.getAll("modify")
if val.get("X") or not val.get("Y") or val["Y"] != "53_0":
print "remove(X) failed"
return 1
j.remove("modify", "Y")
j.remove("modify", "Z")
if j.get("modify", "Y"):
print "remove(Y) failed"
return 1
if j.get("modify", "Z"):
print "remove(Z) failed"
return 1
print "All tests passed"
return 0
if __name__ == '__main__':
import sys
vgName = None
if len(sys.argv) > 1:
vgName = sys.argv[1]
ret = _runTests(vgName)
sys.exit(ret)
| lgpl-2.1 |
ilya-klyuchnikov/buck | third-party/py/pex/pex/environment.py | 52 | 7749 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import itertools
import os
import site
import sys
import uuid
from pkg_resources import (
DistributionNotFound,
Environment,
Requirement,
WorkingSet,
find_distributions
)
from .common import die, open_zip, safe_mkdir, safe_rmtree
from .interpreter import PythonInterpreter
from .package import distribution_compatible
from .pex_builder import PEXBuilder
from .pex_info import PexInfo
from .tracer import TRACER
from .util import CacheHelper, DistributionHelper
class PEXEnvironment(Environment):
@classmethod
def force_local(cls, pex, pex_info):
if pex_info.code_hash is None:
# Do not support force_local if code_hash is not set. (It should always be set.)
return pex
explode_dir = os.path.join(pex_info.zip_unsafe_cache, pex_info.code_hash)
TRACER.log('PEX is not zip safe, exploding to %s' % explode_dir)
if not os.path.exists(explode_dir):
explode_tmp = explode_dir + '.' + uuid.uuid4().hex
with TRACER.timed('Unzipping %s' % pex):
try:
safe_mkdir(explode_tmp)
with open_zip(pex) as pex_zip:
pex_files = (x for x in pex_zip.namelist()
if not x.startswith(PEXBuilder.BOOTSTRAP_DIR) and
not x.startswith(PexInfo.INTERNAL_CACHE))
pex_zip.extractall(explode_tmp, pex_files)
except: # noqa: T803
safe_rmtree(explode_tmp)
raise
TRACER.log('Renaming %s to %s' % (explode_tmp, explode_dir))
os.rename(explode_tmp, explode_dir)
return explode_dir
@classmethod
def update_module_paths(cls, new_code_path):
# Force subsequent imports to come from the .pex directory rather than the .pex file.
TRACER.log('Adding to the head of sys.path: %s' % new_code_path)
sys.path.insert(0, new_code_path)
for name, module in sys.modules.items():
if hasattr(module, "__path__"):
module_dir = os.path.join(new_code_path, *name.split("."))
TRACER.log('Adding to the head of %s.__path__: %s' % (module.__name__, module_dir))
module.__path__.insert(0, module_dir)
@classmethod
def write_zipped_internal_cache(cls, pex, pex_info):
prefix_length = len(pex_info.internal_cache) + 1
existing_cached_distributions = []
newly_cached_distributions = []
zip_safe_distributions = []
with open_zip(pex) as zf:
# Distribution names are the first element after ".deps/" and before the next "/"
distribution_names = set(filter(None, (filename[prefix_length:].split('/')[0]
for filename in zf.namelist() if filename.startswith(pex_info.internal_cache))))
# Create Distribution objects from these, and possibly write to disk if necessary.
for distribution_name in distribution_names:
internal_dist_path = '/'.join([pex_info.internal_cache, distribution_name])
# First check if this is already cached
dist_digest = pex_info.distributions.get(distribution_name) or CacheHelper.zip_hash(
zf, internal_dist_path)
cached_location = os.path.join(pex_info.install_cache, '%s.%s' % (
distribution_name, dist_digest))
if os.path.exists(cached_location):
dist = DistributionHelper.distribution_from_path(cached_location)
existing_cached_distributions.append(dist)
continue
else:
dist = DistributionHelper.distribution_from_path(os.path.join(pex, internal_dist_path))
if DistributionHelper.zipsafe(dist) and not pex_info.always_write_cache:
zip_safe_distributions.append(dist)
continue
with TRACER.timed('Caching %s' % dist):
newly_cached_distributions.append(
CacheHelper.cache_distribution(zf, internal_dist_path, cached_location))
return existing_cached_distributions, newly_cached_distributions, zip_safe_distributions
@classmethod
def load_internal_cache(cls, pex, pex_info):
"""Possibly cache out the internal cache."""
internal_cache = os.path.join(pex, pex_info.internal_cache)
with TRACER.timed('Searching dependency cache: %s' % internal_cache, V=2):
if os.path.isdir(pex):
for dist in find_distributions(internal_cache):
yield dist
else:
for dist in itertools.chain(*cls.write_zipped_internal_cache(pex, pex_info)):
yield dist
def __init__(self, pex, pex_info, interpreter=None, **kw):
self._internal_cache = os.path.join(pex, pex_info.internal_cache)
self._pex = pex
self._pex_info = pex_info
self._activated = False
self._working_set = None
self._interpreter = interpreter or PythonInterpreter.get()
super(PEXEnvironment, self).__init__(
search_path=sys.path if pex_info.inherit_path else [], **kw)
def update_candidate_distributions(self, distribution_iter):
for dist in distribution_iter:
if self.can_add(dist):
with TRACER.timed('Adding %s' % dist, V=2):
self.add(dist)
def can_add(self, dist):
return distribution_compatible(dist, self._interpreter, self.platform)
def activate(self):
if not self._activated:
with TRACER.timed('Activating PEX virtual environment from %s' % self._pex):
self._working_set = self._activate()
self._activated = True
return self._working_set
def _resolve(self, working_set, reqs):
reqs = reqs[:]
unresolved_reqs = set()
resolveds = set()
# Resolve them one at a time so that we can figure out which ones we need to elide should
# there be an interpreter incompatibility.
for req in reqs:
with TRACER.timed('Resolving %s' % req, V=2):
try:
resolveds.update(working_set.resolve([req], env=self))
except DistributionNotFound as e:
TRACER.log('Failed to resolve a requirement: %s' % e)
unresolved_reqs.add(e.args[0].project_name)
# Older versions of pkg_resources just call `DistributionNotFound(req)` instead of the
# modern `DistributionNotFound(req, requirers)` and so we may not have the 2nd requirers
# slot at all.
if len(e.args) >= 2 and e.args[1]:
unresolved_reqs.update(e.args[1])
unresolved_reqs = set([req.lower() for req in unresolved_reqs])
if unresolved_reqs:
TRACER.log('Unresolved requirements:')
for req in unresolved_reqs:
TRACER.log(' - %s' % req)
TRACER.log('Distributions contained within this pex:')
if not self._pex_info.distributions:
TRACER.log(' None')
else:
for dist in self._pex_info.distributions:
TRACER.log(' - %s' % dist)
if not self._pex_info.ignore_errors:
die('Failed to execute PEX file, missing compatible dependencies for:\n%s' % (
'\n'.join(map(str, unresolved_reqs))))
return resolveds
def _activate(self):
self.update_candidate_distributions(self.load_internal_cache(self._pex, self._pex_info))
if not self._pex_info.zip_safe and os.path.isfile(self._pex):
self.update_module_paths(self.force_local(self._pex, self._pex_info))
all_reqs = [Requirement.parse(req) for req in self._pex_info.requirements]
working_set = WorkingSet([])
resolved = self._resolve(working_set, all_reqs)
for dist in resolved:
with TRACER.timed('Activating %s' % dist, V=2):
working_set.add(dist)
if os.path.isdir(dist.location):
with TRACER.timed('Adding sitedir', V=2):
site.addsitedir(dist.location)
dist.activate()
return working_set
| apache-2.0 |
bluevoda/BloggyBlog | lib/python3.4/site-packages/django/contrib/postgres/fields/array.py | 59 | 9933 | import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.db.models.lookups import Exact, In
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
# For performance, only add a from_db_value() method if the base field
# implements it.
if hasattr(self.base_field, 'from_db_value'):
self.from_db_value = self._from_db_value
super(ArrayField, self).__init__(**kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field,
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def _from_db_value(self, value, expression, connection, context):
if value is None:
return value
return [
self.base_field.from_db_value(item, expression, connection, context)
for item in value
]
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
if val is None:
values.append(None)
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for index, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super(ArrayField, self).run_validators(value)
for index, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayExact(Exact):
def as_sql(self, qn, connection):
sql, params = super(ArrayExact, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE '
'coalesce(array_length(%(lhs)s, 1), 0) END'
) % {'lhs': lhs}, params
@ArrayField.register_lookup
class ArrayInLookup(In):
def get_prep_lookup(self):
values = super(ArrayInLookup, self).get_prep_lookup()
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
return [tuple(value) for value in values]
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
| gpl-3.0 |
xen0l/ansible | lib/ansible/plugins/lookup/flattened.py | 101 | 2821 | # (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: flattened
author: Serge van Ginderachter <serge@vanginderachter.be>
version_added: "1.3"
short_description: return single list completely flattened
description:
- given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
options:
_terms:
description: lists to flatten
required: True
notes:
- unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore.
- aka highlander plugin, there can only be one (list).
"""
EXAMPLES = """
- name: "'unnest' all elements into single list"
debug: msg="all in one list {{lookup('flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
"""
RETURN = """
_raw:
description:
- flattened list
type: list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _check_list_of_one_list(self, term):
# make sure term is not a list of one (list of one..) item
# return the final non list item if so
if isinstance(term, list) and len(term) == 1:
term = term[0]
if isinstance(term, list):
term = self._check_list_of_one_list(term)
return term
def _do_flatten(self, terms, variables):
ret = []
for term in terms:
term = self._check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
break
if isinstance(term, string_types):
# convert a variable to a list
term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [term]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
term = self._do_flatten(term, variables)
ret.extend(term)
else:
ret.append(term)
return ret
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_flattened expects a list")
return self._do_flatten(terms, variables)
| gpl-3.0 |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
ms-iot/python | cpython/Lib/email/utils.py | 3 | 14263 | # Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'format_datetime',
'getaddresses',
'make_msgid',
'mktime_tz',
'parseaddr',
'parsedate',
'parsedate_tz',
'parsedate_to_datetime',
'unquote',
]
import os
import re
import time
import random
import socket
import datetime
import urllib.parse
from email._parseaddr import quote
from email._parseaddr import AddressList as _AddressList
from email._parseaddr import mktime_tz
from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
# Intrapackage imports
from email.charset import Charset
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = ''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[\\"]')
def _has_surrogates(s):
"""Return True if s contains surrogate-escaped binary data."""
# This check is based on the fact that unless there are surrogates, utf8
# (Python's default encoding) can encode any string. This is the fastest
# way to check for surrogates, see issue 11454 for timings.
try:
s.encode()
return False
except UnicodeEncodeError:
return True
# How to deal with a string containing bytes before handing it to the
# application through the 'normal' interface.
def _sanitize(string):
# Turn any escaped bytes into unicode 'unknown' char. If the escaped
# bytes happen to be utf-8 they will instead get decoded, even if they
# were invalid in the charset the source was supposed to be in. This
# seems like it is not a bad thing; a defect was still registered.
original_bytes = string.encode('utf-8', 'surrogateescape')
return original_bytes.decode('utf-8', 'replace')
# Helpers
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
def _format_timetuple_and_zone(timetuple, zone):
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
timetuple[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
zone)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return _format_timetuple_and_zone(now, zone)
def format_datetime(dt, usegmt=False):
"""Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps.
"""
now = dt.timetuple()
if usegmt:
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
raise ValueError("usegmt option requires a UTC datetime")
zone = 'GMT'
elif dt.tzinfo is None:
zone = '-0000'
else:
zone = dt.strftime("%z")
return _format_timetuple_and_zone(now, zone)
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<142480216486.20800.16526388040877946887@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = int(time.time()*100)
pid = os.getpid()
randint = random.getrandbits(64)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
domain = socket.getfqdn()
msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, domain)
return msgid
def parsedate_to_datetime(data):
*dtuple, tz = _parsedate_tz(data)
if tz is None:
return datetime.datetime(*dtuple[:6])
return datetime.datetime(*dtuple[:6],
tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
def parseaddr(addr):
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
s = urllib.parse.quote(s, safe='', encoding=charset or 'ascii')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
re.ASCII)
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
# Copy params so we don't mess with the original
params = params[:]
new_params = []
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
name, value = params.pop(0)
new_params.append((name, value))
while params:
name, value = params.pop(0)
if name.endswith('*'):
encoded = True
else:
encoded = False
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
# Decode as "latin-1", so the characters in s directly
# represent the percent-encoded octet values.
# collapse_rfc2231_value treats this as an octet sequence.
s = urllib.parse.unquote(s, encoding="latin-1")
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if not isinstance(value, tuple) or len(value) != 3:
return unquote(value)
# While value comes to us as a unicode string, we need it to be a bytes
# object. We do not want bytes() normal utf-8 decoder, we want a straight
# interpretation of the string as character bytes.
charset, language, text = value
if charset is None:
# Issue 17369: if charset/lang is None, decode_rfc2231 couldn't parse
# the value, so use the fallback_charset.
charset = fallback_charset
rawbytes = bytes(text, 'raw-unicode-escape')
try:
return str(rawbytes, charset, errors)
except LookupError:
# charset is not a known codec.
return unquote(text)
#
# datetime doesn't provide a localtime function yet, so provide one. Code
# adapted from the patch in issue 9527. This may not be perfect, but it is
# better than not having it.
#
def localtime(dt=None, isdst=-1):
"""Return local time as an aware datetime object.
If called without arguments, return current time. Otherwise *dt*
argument should be a datetime instance, and it is converted to the
local time zone according to the system time zone database. If *dt* is
naive (that is, dt.tzinfo is None), it is assumed to be in local time.
In this case, a positive or zero value for *isdst* causes localtime to
presume initially that summer time (for example, Daylight Saving Time)
is or is not (respectively) in effect for the specified time. A
negative value for *isdst* causes the localtime() function to attempt
to divine whether summer time is in effect for the specified time.
"""
if dt is None:
return datetime.datetime.now(datetime.timezone.utc).astimezone()
if dt.tzinfo is not None:
return dt.astimezone()
# We have a naive datetime. Convert to a (localtime) timetuple and pass to
# system mktime together with the isdst hint. System mktime will return
# seconds since epoch.
tm = dt.timetuple()[:-1] + (isdst,)
seconds = time.mktime(tm)
localtm = time.localtime(seconds)
try:
delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
tz = datetime.timezone(delta, localtm.tm_zone)
except AttributeError:
# Compute UTC offset and compare with the value implied by tm_isdst.
# If the values match, use the zone name implied by tm_isdst.
delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
dst = time.daylight and localtm.tm_isdst > 0
gmtoff = -(time.altzone if dst else time.timezone)
if delta == datetime.timedelta(seconds=gmtoff):
tz = datetime.timezone(delta, time.tzname[dst])
else:
tz = datetime.timezone(delta)
return dt.replace(tzinfo=tz)
| bsd-3-clause |
ComNets-Bremen/WUSN | wusnExampleServer/SimpleLogger/management/commands/SerialListener.py | 1 | 3139 | from django.core.management.base import BaseCommand, CommandError
from SimpleLogger.models import SimpleWusnData
import serial, traceback, re, datetime, pytz
RE_STRING = r"^#\[(?P<p_number>\d+)\]\[(?P<node_id>\d+)\]\s+D(?P<y>\d+)\/(?P<m>\d+)\/(?P<d>\d+)\s+t(?P<hour>\d+)\:(?P<minute>\d+)\s+W(?P<water>\d+)\s+T(?P<temp>\d+)\s+(?P<chksum>\d+)\s+(?P<success_ind>\d+)\s+\[RX_RSSI:(?P<rssi>[\d+-]+)\]"
class Command(BaseCommand):
help = 'Listen on a given USB port for incoming data and insert this into model'
def add_arguments(self, parser):
parser.add_argument('port', type=str, default="/dev/ttyUSB0")
parser.add_argument('baudrate', type=int, default=115200)
def handle(self, *args, **options):
self.stdout.write(self.style.SUCCESS('Starting listener on Port %s...' %
options['port']))
self.stdout.write(self.style.SUCCESS('Baudrate: %s...' %
options['baudrate']))
re_c = re.compile(RE_STRING)
ser = serial.Serial(options['port'], options['baudrate'], timeout=10)
while True:
data = ser.readline()[:-1]
if len(data) == 0:
continue
print "Received:", data
values = re_c.search(data)
if values != None:
dt = datetime.datetime(
int(values.group('y'))+2000,
int(values.group('m')),
int(values.group('d')),
int(values.group('hour')),
int(values.group('minute')),
tzinfo=pytz.utc
)
water_raw = int(values.group('water'))
# The following value is used for the dielectric value epsilon
# Adapt it according to the soil
water = float(water_raw)/50.0
temperature_raw = int(values.group('temp'))
temperature_decompressed = temperature_raw
if temperature_raw > 900:
temperature_decompressed = \
5.0*(float(temperature_raw)-900.0)+900.0
temperature = (float(temperature_decompressed) - 400.0)/10.0
try:
dataset = SimpleWusnData()
dataset.date_time_sensor = dt
dataset.packet_number = values.group('p_number')
dataset.node_id = values.group('node_id')
dataset.water_raw = water_raw
dataset.water = water
dataset.temperature_raw = temperature_raw
dataset.temperature = temperature
dataset.rssi = int(values.group('rssi'))
dataset.success_ind = int(values.group('success_ind'))
dataset.checksum = int(values.group('chksum'))
dataset.save()
except:
self.stdout.write(self.style.ERROR('Cannot insert data'))
print traceback.format_exc()
else:
self.stdout.write(self.style.SUCCESS('Data inserted'))
| gpl-3.0 |
Avinash-Raj/appengine-django-skeleton | lib/_mysql_exceptions.py | 99 | 2352 | """_mysql_exceptions: Exception classes for _mysql and MySQLdb.
These classes are dictated by the DB API v2.0:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
"""
try:
from exceptions import Exception, StandardError, Warning
except ImportError:
# Python 3
StandardError = Exception
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
| bsd-3-clause |
StellarCN/py-stellar-base | stellar_sdk/xdr/bucket_metadata.py | 1 | 2252 | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .bucket_metadata_ext import BucketMetadataExt
from .uint32 import Uint32
__all__ = ["BucketMetadata"]
class BucketMetadata:
"""
XDR Source Code
----------------------------------------------------------------
struct BucketMetadata
{
// Indicates the protocol version used to create / merge this bucket.
uint32 ledgerVersion;
// reserved for future use
union switch (int v)
{
case 0:
void;
}
ext;
};
----------------------------------------------------------------
"""
def __init__(
self,
ledger_version: Uint32,
ext: BucketMetadataExt,
) -> None:
self.ledger_version = ledger_version
self.ext = ext
def pack(self, packer: Packer) -> None:
self.ledger_version.pack(packer)
self.ext.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "BucketMetadata":
ledger_version = Uint32.unpack(unpacker)
ext = BucketMetadataExt.unpack(unpacker)
return cls(
ledger_version=ledger_version,
ext=ext,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "BucketMetadata":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "BucketMetadata":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.ledger_version == other.ledger_version and self.ext == other.ext
def __str__(self):
out = [
f"ledger_version={self.ledger_version}",
f"ext={self.ext}",
]
return f"<BucketMetadata {[', '.join(out)]}>"
| apache-2.0 |
hryamzik/ansible | lib/ansible/modules/remote_management/foreman/katello.py | 7 | 20467 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server.
version_added: "2.3"
author:
- Eric D Helms (@ehelms)
requirements:
- nailgun >= 0.28.0
- python >= 2.6
- datetime
options:
server_url:
description:
- URL of Foreman server.
required: true
username:
description:
- Username on Foreman server.
required: true
password:
description:
- Password for user accessing Foreman server.
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host).
choices:
- repository
- manifest
- repository_set
- sync_plan
- content_view
- lifecycle_environment
- activation_key
required: true
action:
description:
- action associated to the entity resource to set or edit in dictionary format.
- Possible Action in relation to Entitys.
- "sync (available when entity=product or entity=repository)"
- "publish (available when entity=content_view)"
- "promote (available when entity=content_view)"
choices:
- sync
- publish
- promote
required: false
params:
description:
- Parameters associated to the entity resource and action, to set or edit in dictionary format.
- Each choice may be only available with specific entitys and actions.
- "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
- The action "None" means no action specified.
- Possible Params in relation to entity and action.
- "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
- "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
- "content ([manifest,None])"
- "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
- "basearch ([repository_set,None])"
- "releaserver ([repository_set,None])"
- "sync_date ([sync_plan,None])"
- "interval ([sync_plan,None])"
- "repositories ([content_view,None])"
- "from_environment ([content_view,promote])"
- "to_environment([content_view,promote])"
- "prior ([lifecycle_environment,None])"
- "content_view ([activation_key,None])"
- "lifecycle_environment ([activation_key,None])"
required: true
task_timeout:
description:
- The timeout in seconds to wait for the started Foreman action to finish.
- If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
default: 1000
version_added: "2.7"
required: false
verify_ssl:
description:
- verify the ssl/https connection (e.g for a valid certificate)
default: false
type: bool
required: false
'''
EXAMPLES = '''
---
# Simple Example:
- name: Create Product
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: product
params:
name: Centos 7
delegate_to: localhost
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
katello:
username: admin
password: admin
server_url: https://fakeserver.com
entity: "{{ entity }}"
params: "{{ params }}"
delegate_to: localhost
# tasks.yml
---
- include: katello.yml
vars:
name: Create Dev Environment
entity: lifecycle_environment
params:
name: Dev
prior: Library
organization: Default Organization
- include: katello.yml
vars:
name: Create Centos Product
entity: product
params:
name: Centos 7
organization: Default Organization
- include: katello.yml
vars:
name: Create 7.2 Repository
entity: repository
params:
name: Centos 7.2
product: Centos 7
organization: Default Organization
content_type: yum
url: http://mirror.centos.org/centos/7/os/x86_64/
- include: katello.yml
vars:
name: Create Centos 7 View
entity: content_view
params:
name: Centos 7 View
organization: Default Organization
repositories:
- name: Centos 7.2
product: Centos 7
- include: katello.yml
vars:
name: Enable RHEL Product
entity: repository_set
params:
name: Red Hat Enterprise Linux 7 Server (RPMs)
product: Red Hat Enterprise Linux Server
organization: Default Organization
basearch: x86_64
releasever: 7
- include: katello.yml
vars:
name: Promote Contentview Environment with longer timout
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
# Best Practices
# In Foreman, things can be done in paralell.
# When a conflicting action is already running,
# the task will fail instantly instead of waiting for the already running action to complete.
# So you sould use a "until success" loop to catch this.
- name: Promote Contentview Environment with increased Timeout
katello:
username: ansibleuser
password: supersecret
task_timeout: 10800
entity: content_view
action: promote
params:
name: MyContentView
organization: MyOrganisation
from_environment: Testing
to_environment: Production
register: task_result
until: task_result is success
retries: 9
delay: 120
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module, task_timeout):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = task_timeout
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True),
username=dict(type='str', required=True, no_log=True),
password=dict(type='str', required=True, no_log=True),
entity=dict(type='str', required=True,
choices=['repository', 'manifest', 'repository_set', 'sync_plan', 'content_view', 'lifecycle_environment', 'activation_key']),
action=dict(type='str', choices=['sync', 'publish', 'promote']),
verify_ssl=dict(type='bool', default=False),
task_timeout=dict(type='int', default=1000),
params=dict(type='dict', required=True, no_log=True),
),
supports_check_mode=True,
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
task_timeout = module.params['task_timeout']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module, task_timeout)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
if __name__ == '__main__':
main()
| gpl-3.0 |
Xeleste/namebench | nb_third_party/dns/rdtypes/ANY/HINFO.py | 248 | 2659 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
self.cpu = cpu
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.cpu)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.cpu)
l = len(self.os)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.os)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current : current + l]
return cls(rdclass, rdtype, cpu, os)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.cpu, other.cpu)
if v == 0:
v = cmp(self.os, other.os)
return v
| apache-2.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/plat-linux2/IN.py | 155 | 13030 | # Generated by h2py from /usr/include/netinet/in.h
_NETINET_IN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506L
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009L
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from stdint.h
_STDINT_H = 1
# Included from bits/wchar.h
_BITS_WCHAR_H = 1
__WCHAR_MIN = (-2147483647l - 1l)
__WCHAR_MAX = (2147483647l)
# Included from bits/wordsize.h
__WORDSIZE = 32
def __INT64_C(c): return c ## L
def __UINT64_C(c): return c ## UL
def __INT64_C(c): return c ## LL
def __UINT64_C(c): return c ## ULL
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INT64_MIN = (-__INT64_C(9223372036854775807)-1)
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INT64_MAX = (__INT64_C(9223372036854775807))
UINT8_MAX = (255)
UINT16_MAX = (65535)
UINT64_MAX = (__UINT64_C(18446744073709551615))
INT_LEAST8_MIN = (-128)
INT_LEAST16_MIN = (-32767-1)
INT_LEAST32_MIN = (-2147483647-1)
INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_LEAST8_MAX = (127)
INT_LEAST16_MAX = (32767)
INT_LEAST32_MAX = (2147483647)
INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
UINT_LEAST8_MAX = (255)
UINT_LEAST16_MAX = (65535)
UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
INT_FAST8_MIN = (-128)
INT_FAST16_MIN = (-9223372036854775807L-1)
INT_FAST32_MIN = (-9223372036854775807L-1)
INT_FAST16_MIN = (-2147483647-1)
INT_FAST32_MIN = (-2147483647-1)
INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_FAST8_MAX = (127)
INT_FAST16_MAX = (9223372036854775807L)
INT_FAST32_MAX = (9223372036854775807L)
INT_FAST16_MAX = (2147483647)
INT_FAST32_MAX = (2147483647)
INT_FAST64_MAX = (__INT64_C(9223372036854775807))
UINT_FAST8_MAX = (255)
UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
INTPTR_MIN = (-9223372036854775807L-1)
INTPTR_MAX = (9223372036854775807L)
INTPTR_MIN = (-2147483647-1)
INTPTR_MAX = (2147483647)
INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
INTMAX_MAX = (__INT64_C(9223372036854775807))
UINTMAX_MAX = (__UINT64_C(18446744073709551615))
PTRDIFF_MIN = (-9223372036854775807L-1)
PTRDIFF_MAX = (9223372036854775807L)
PTRDIFF_MIN = (-2147483647-1)
PTRDIFF_MAX = (2147483647)
SIG_ATOMIC_MIN = (-2147483647-1)
SIG_ATOMIC_MAX = (2147483647)
WCHAR_MIN = __WCHAR_MIN
WCHAR_MAX = __WCHAR_MAX
def INT8_C(c): return c
def INT16_C(c): return c
def INT32_C(c): return c
def INT64_C(c): return c ## L
def INT64_C(c): return c ## LL
def UINT8_C(c): return c ## U
def UINT16_C(c): return c ## U
def UINT32_C(c): return c ## U
def UINT64_C(c): return c ## UL
def UINT64_C(c): return c ## ULL
def INTMAX_C(c): return c ## L
def UINTMAX_C(c): return c ## UL
def INTMAX_C(c): return c ## LL
def UINTMAX_C(c): return c ## ULL
# Included from bits/types.h
_BITS_TYPES_H = 1
__FD_SETSIZE = 1024
# Included from bits/pthreadtypes.h
_BITS_PTHREADTYPES_H = 1
# Included from bits/sched.h
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
CSIGNAL = 0x000000ff
CLONE_VM = 0x00000100
CLONE_FS = 0x00000200
CLONE_FILES = 0x00000400
CLONE_SIGHAND = 0x00000800
CLONE_PID = 0x00001000
CLONE_PTRACE = 0x00002000
CLONE_VFORK = 0x00004000
__defined_schedparam = 1
def IN_CLASSA(a): return ((((in_addr_t)(a)) & (-2147483648)) == 0)
IN_CLASSA_NET = (-16777216)
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = ((-1) & ~IN_CLASSA_NET)
IN_CLASSA_MAX = 128
def IN_CLASSB(a): return ((((in_addr_t)(a)) & (-1073741824)) == (-2147483648))
IN_CLASSB_NET = (-65536)
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = ((-1) & ~IN_CLASSB_NET)
IN_CLASSB_MAX = 65536
def IN_CLASSC(a): return ((((in_addr_t)(a)) & (-536870912)) == (-1073741824))
IN_CLASSC_NET = (-256)
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = ((-1) & ~IN_CLASSC_NET)
def IN_CLASSD(a): return ((((in_addr_t)(a)) & (-268435456)) == (-536870912))
def IN_MULTICAST(a): return IN_CLASSD(a)
def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & (-536870912)) == (-536870912))
def IN_BADCLASS(a): return ((((in_addr_t)(a)) & (-268435456)) == (-268435456))
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
INET6_ADDRSTRLEN = 46
# Included from bits/socket.h
# Included from limits.h
_LIBC_LIMITS_H_ = 1
MB_LEN_MAX = 16
_LIMITS_H = 1
CHAR_BIT = 8
SCHAR_MIN = (-128)
SCHAR_MAX = 127
UCHAR_MAX = 255
CHAR_MIN = 0
CHAR_MAX = UCHAR_MAX
CHAR_MIN = SCHAR_MIN
CHAR_MAX = SCHAR_MAX
SHRT_MIN = (-32768)
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MAX = 2147483647
LONG_MAX = 9223372036854775807L
LONG_MAX = 2147483647L
LONG_MIN = (-LONG_MAX - 1L)
# Included from bits/posix1_lim.h
_BITS_POSIX1_LIM_H = 1
_POSIX_AIO_LISTIO_MAX = 2
_POSIX_AIO_MAX = 1
_POSIX_ARG_MAX = 4096
_POSIX_CHILD_MAX = 6
_POSIX_DELAYTIMER_MAX = 32
_POSIX_LINK_MAX = 8
_POSIX_MAX_CANON = 255
_POSIX_MAX_INPUT = 255
_POSIX_MQ_OPEN_MAX = 8
_POSIX_MQ_PRIO_MAX = 32
_POSIX_NGROUPS_MAX = 0
_POSIX_OPEN_MAX = 16
_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
_POSIX_NAME_MAX = 14
_POSIX_PATH_MAX = 256
_POSIX_PIPE_BUF = 512
_POSIX_RTSIG_MAX = 8
_POSIX_SEM_NSEMS_MAX = 256
_POSIX_SEM_VALUE_MAX = 32767
_POSIX_SIGQUEUE_MAX = 32
_POSIX_SSIZE_MAX = 32767
_POSIX_STREAM_MAX = 8
_POSIX_TZNAME_MAX = 6
_POSIX_QLIMIT = 1
_POSIX_HIWAT = _POSIX_PIPE_BUF
_POSIX_UIO_MAXIOV = 16
_POSIX_TTY_NAME_MAX = 9
_POSIX_TIMER_MAX = 32
_POSIX_LOGIN_NAME_MAX = 9
_POSIX_CLOCKRES_MIN = 20000000
# Included from bits/local_lim.h
# Included from linux/limits.h
NR_OPEN = 1024
NGROUPS_MAX = 32
ARG_MAX = 131072
CHILD_MAX = 999
OPEN_MAX = 256
LINK_MAX = 127
MAX_CANON = 255
MAX_INPUT = 255
NAME_MAX = 255
PATH_MAX = 4096
PIPE_BUF = 4096
RTSIG_MAX = 32
_POSIX_THREAD_KEYS_MAX = 128
PTHREAD_KEYS_MAX = 1024
_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
_POSIX_THREAD_THREADS_MAX = 64
PTHREAD_THREADS_MAX = 1024
AIO_PRIO_DELTA_MAX = 20
PTHREAD_STACK_MIN = 16384
TIMER_MAX = 256
SSIZE_MAX = LONG_MAX
NGROUPS_MAX = _POSIX_NGROUPS_MAX
# Included from bits/posix2_lim.h
_BITS_POSIX2_LIM_H = 1
_POSIX2_BC_BASE_MAX = 99
_POSIX2_BC_DIM_MAX = 2048
_POSIX2_BC_SCALE_MAX = 99
_POSIX2_BC_STRING_MAX = 1000
_POSIX2_COLL_WEIGHTS_MAX = 2
_POSIX2_EXPR_NEST_MAX = 32
_POSIX2_LINE_MAX = 2048
_POSIX2_RE_DUP_MAX = 255
_POSIX2_CHARCLASS_NAME_MAX = 14
BC_BASE_MAX = _POSIX2_BC_BASE_MAX
BC_DIM_MAX = _POSIX2_BC_DIM_MAX
BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
BC_STRING_MAX = _POSIX2_BC_STRING_MAX
COLL_WEIGHTS_MAX = 255
EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
LINE_MAX = _POSIX2_LINE_MAX
CHARCLASS_NAME_MAX = 2048
RE_DUP_MAX = (0x7fff)
# Included from bits/xopen_lim.h
_XOPEN_LIM_H = 1
# Included from bits/stdio_lim.h
L_tmpnam = 20
TMP_MAX = 238328
FILENAME_MAX = 4096
L_ctermid = 9
L_cuserid = 9
FOPEN_MAX = 16
IOV_MAX = 1024
_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
NL_ARGMAX = _POSIX_ARG_MAX
NL_LANGMAX = _POSIX2_LINE_MAX
NL_MSGMAX = INT_MAX
NL_NMAX = INT_MAX
NL_SETMAX = INT_MAX
NL_TEXTMAX = INT_MAX
NZERO = 20
WORD_BIT = 16
WORD_BIT = 32
WORD_BIT = 64
WORD_BIT = 16
WORD_BIT = 32
WORD_BIT = 64
WORD_BIT = 32
LONG_BIT = 32
LONG_BIT = 64
LONG_BIT = 32
LONG_BIT = 64
LONG_BIT = 64
LONG_BIT = 32
from TYPES import *
PF_UNSPEC = 0
PF_LOCAL = 1
PF_UNIX = PF_LOCAL
PF_FILE = PF_LOCAL
PF_INET = 2
PF_AX25 = 3
PF_IPX = 4
PF_APPLETALK = 5
PF_NETROM = 6
PF_BRIDGE = 7
PF_ATMPVC = 8
PF_X25 = 9
PF_INET6 = 10
PF_ROSE = 11
PF_DECnet = 12
PF_NETBEUI = 13
PF_SECURITY = 14
PF_KEY = 15
PF_NETLINK = 16
PF_ROUTE = PF_NETLINK
PF_PACKET = 17
PF_ASH = 18
PF_ECONET = 19
PF_ATMSVC = 20
PF_SNA = 22
PF_IRDA = 23
PF_PPPOX = 24
PF_WANPIPE = 25
PF_BLUETOOTH = 31
PF_MAX = 32
AF_UNSPEC = PF_UNSPEC
AF_LOCAL = PF_LOCAL
AF_UNIX = PF_UNIX
AF_FILE = PF_FILE
AF_INET = PF_INET
AF_AX25 = PF_AX25
AF_IPX = PF_IPX
AF_APPLETALK = PF_APPLETALK
AF_NETROM = PF_NETROM
AF_BRIDGE = PF_BRIDGE
AF_ATMPVC = PF_ATMPVC
AF_X25 = PF_X25
AF_INET6 = PF_INET6
AF_ROSE = PF_ROSE
AF_DECnet = PF_DECnet
AF_NETBEUI = PF_NETBEUI
AF_SECURITY = PF_SECURITY
AF_KEY = PF_KEY
AF_NETLINK = PF_NETLINK
AF_ROUTE = PF_ROUTE
AF_PACKET = PF_PACKET
AF_ASH = PF_ASH
AF_ECONET = PF_ECONET
AF_ATMSVC = PF_ATMSVC
AF_SNA = PF_SNA
AF_IRDA = PF_IRDA
AF_PPPOX = PF_PPPOX
AF_WANPIPE = PF_WANPIPE
AF_BLUETOOTH = PF_BLUETOOTH
AF_MAX = PF_MAX
SOL_RAW = 255
SOL_DECNET = 261
SOL_X25 = 262
SOL_PACKET = 263
SOL_ATM = 264
SOL_AAL = 265
SOL_IRDA = 266
SOMAXCONN = 128
# Included from bits/sockaddr.h
_BITS_SOCKADDR_H = 1
def __SOCKADDR_COMMON(sa_prefix): return \
_SS_SIZE = 128
def CMSG_FIRSTHDR(mhdr): return \
# Included from asm/socket.h
# Included from asm/sockios.h
FIOSETOWN = 0x8901
SIOCSPGRP = 0x8902
FIOGETOWN = 0x8903
SIOCGPGRP = 0x8904
SIOCATMARK = 0x8905
SIOCGSTAMP = 0x8906
SOL_SOCKET = 1
SO_DEBUG = 1
SO_REUSEADDR = 2
SO_TYPE = 3
SO_ERROR = 4
SO_DONTROUTE = 5
SO_BROADCAST = 6
SO_SNDBUF = 7
SO_RCVBUF = 8
SO_KEEPALIVE = 9
SO_OOBINLINE = 10
SO_NO_CHECK = 11
SO_PRIORITY = 12
SO_LINGER = 13
SO_BSDCOMPAT = 14
SO_PASSCRED = 16
SO_PEERCRED = 17
SO_RCVLOWAT = 18
SO_SNDLOWAT = 19
SO_RCVTIMEO = 20
SO_SNDTIMEO = 21
SO_SECURITY_AUTHENTICATION = 22
SO_SECURITY_ENCRYPTION_TRANSPORT = 23
SO_SECURITY_ENCRYPTION_NETWORK = 24
SO_BINDTODEVICE = 25
SO_ATTACH_FILTER = 26
SO_DETACH_FILTER = 27
SO_PEERNAME = 28
SO_TIMESTAMP = 29
SCM_TIMESTAMP = SO_TIMESTAMP
SO_ACCEPTCONN = 30
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_PACKET = 10
SOCK_MAX = (SOCK_PACKET+1)
# Included from bits/in.h
IP_TOS = 1
IP_TTL = 2
IP_HDRINCL = 3
IP_OPTIONS = 4
IP_ROUTER_ALERT = 5
IP_RECVOPTS = 6
IP_RETOPTS = 7
IP_PKTINFO = 8
IP_PKTOPTIONS = 9
IP_PMTUDISC = 10
IP_MTU_DISCOVER = 10
IP_RECVERR = 11
IP_RECVTTL = 12
IP_RECVTOS = 13
IP_MULTICAST_IF = 32
IP_MULTICAST_TTL = 33
IP_MULTICAST_LOOP = 34
IP_ADD_MEMBERSHIP = 35
IP_DROP_MEMBERSHIP = 36
IP_RECVRETOPTS = IP_RETOPTS
IP_PMTUDISC_DONT = 0
IP_PMTUDISC_WANT = 1
IP_PMTUDISC_DO = 2
SOL_IP = 0
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IPV6_ADDRFORM = 1
IPV6_PKTINFO = 2
IPV6_HOPOPTS = 3
IPV6_DSTOPTS = 4
IPV6_RTHDR = 5
IPV6_PKTOPTIONS = 6
IPV6_CHECKSUM = 7
IPV6_HOPLIMIT = 8
IPV6_NEXTHOP = 9
IPV6_AUTHHDR = 10
IPV6_UNICAST_HOPS = 16
IPV6_MULTICAST_IF = 17
IPV6_MULTICAST_HOPS = 18
IPV6_MULTICAST_LOOP = 19
IPV6_JOIN_GROUP = 20
IPV6_LEAVE_GROUP = 21
IPV6_ROUTER_ALERT = 22
IPV6_MTU_DISCOVER = 23
IPV6_MTU = 24
IPV6_RECVERR = 25
IPV6_RXHOPOPTS = IPV6_HOPOPTS
IPV6_RXDSTOPTS = IPV6_DSTOPTS
IPV6_ADD_MEMBERSHIP = IPV6_JOIN_GROUP
IPV6_DROP_MEMBERSHIP = IPV6_LEAVE_GROUP
IPV6_PMTUDISC_DONT = 0
IPV6_PMTUDISC_WANT = 1
IPV6_PMTUDISC_DO = 2
SOL_IPV6 = 41
SOL_ICMPV6 = 58
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __LITTLE_ENDIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from bits/byteswap.h
_BITS_BYTESWAP_H = 1
def __bswap_constant_16(x): return \
def __bswap_16(x): return \
def __bswap_16(x): return __bswap_constant_16 (x)
def __bswap_constant_32(x): return \
def __bswap_32(x): return \
def __bswap_32(x): return \
def __bswap_32(x): return __bswap_constant_32 (x)
def __bswap_constant_64(x): return \
def __bswap_64(x): return \
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def ntohl(x): return __bswap_32 (x)
def ntohs(x): return __bswap_16 (x)
def htonl(x): return __bswap_32 (x)
def htons(x): return __bswap_16 (x)
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return
| apache-2.0 |
agualis/test-django-nonrel | django/conf/locale/__init__.py | 157 | 9257 | LANG_INFO = {
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': u'\u0627\u0644\u0639\u0631\u0628\u064a\u0651\u0629',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': u'az\u0259rbaycan dili',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': u'\u09ac\u09be\u0982\u09b2\u09be',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': u'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': u'catal\xe0',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': u'\u010desky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': u'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': u'Dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': u'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': u'\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': u'English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': u'British English',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': u'espa\xf1ol',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': u'espa\xf1ol de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': u'espa\xf1ol de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': u'espa\xf1ol de Nicaragua',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': u'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': u'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': u'\u0641\u0627\u0631\u0633\u06cc',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': u'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': u'Fran\xe7ais',
},
'fy-nl': {
'bidi': False,
'code': 'fy-nl',
'name': 'Frisian',
'name_local': u'Frisian',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': u'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': u'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': u'\u05e2\u05d1\u05e8\u05d9\u05ea',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': u'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': u'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': u'Magyar',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': u'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': u'\xcdslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': u'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': u'\u65e5\u672c\u8a9e',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': u'\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': u'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': u'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': u'\ud55c\uad6d\uc5b4',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': u'Lithuanian',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': u'latvie\u0161u',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': u'\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': u'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': u'Mongolian',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': u'Norsk (bokm\xe5l)',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': u'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': u'Norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': u'Norsk',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': u'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': u'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': u'Portugu\xeas',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': u'Portugu\xeas Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': u'Rom\xe2n\u0103',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': u'\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': u'slovensk\xfd',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': u'Sloven\u0161\u010dina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': u'Albanian',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': u'\u0441\u0440\u043f\u0441\u043a\u0438',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': u'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': u'Svenska',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': u'\u0ba4\u0bae\u0bbf\u0bb4\u0bcd',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': u'\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': u'Thai',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': u'T\xfcrk\xe7e',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430',
},
'ur': {
'bidi': False,
'code': 'ur',
'name': 'Urdu',
'name_local': u'\u0627\u0631\u062f\u0648',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': u'Vietnamese',
},
'zh-cn': {
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': u'\u7b80\u4f53\u4e2d\u6587',
},
'zh-tw': {
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': u'\u7e41\u9ad4\u4e2d\u6587',
}
}
| bsd-3-clause |
camomile-project/camomile-server | test/__init__.py | 1 | 2276 | from __future__ import unicode_literals
import tempfile
import subprocess
import pymongo
import shutil
import time
import sys
from camomile import Camomile
URL = 'http://localhost:3000'
CLIENT = Camomile(URL, debug=False)
MONGO_DIR = None
MONGO_PROCESS = None
NODE_PROCESS = None
ROOT_USERNAME = 'root'
ROOT_PASSWORD = 'password'
def setup():
global MONGO_DIR
global MONGO_PROCESS
global NODE_PROCESS
global CLIENT
# create MONGO_DIR
MONGO_DIR = tempfile.mkdtemp()
print(MONGO_DIR)
# launch MongoDB
sys.stdout.write('Running MongoDB instance... ')
sys.stdout.flush()
cmd = ['mongod', '--dbpath={dbpath:s}'.format(dbpath=MONGO_DIR)]
MONGO_PROCESS = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# testing MongoDB
client = pymongo.MongoClient('localhost', 27017)
try:
# will block until MongoDB is ready
client.database_names()
except Exception:
MONGO_PROCESS.kill()
assert False, 'Cannot connect to the MongoDB instance.'
sys.stdout.write('DONE\n')
sys.stdout.flush()
# launch API
sys.stdout.write('Running Camomile test instance... ')
sys.stdout.flush()
cmd = ['node', 'app.js', '--root-password', ROOT_PASSWORD, '--mongodb-host', 'localhost', '--mongodb-port', '27017']
NODE_PROCESS = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# testing if server is running
for i in range(15):
time.sleep(1)
try:
Camomile(URL, username=ROOT_USERNAME, password=ROOT_PASSWORD)
except Exception:
continue
else:
break
else:
NODE_PROCESS.kill()
assert False, 'Cannot connect to the Camomile test instance.'
sys.stdout.write('DONE\n')
sys.stdout.flush()
def teardown():
global MONGO_DIR
global MONGO_PROCESS
global NODE_PROCESS
# stop API
sys.stdout.write('\nKilling Camomile test instance... ')
sys.stdout.flush()
NODE_PROCESS.kill()
sys.stdout.write('DONE\n')
sys.stdout.flush()
# stop MongoDB
sys.stdout.write('Killing MongoDB instance... ')
sys.stdout.flush()
MONGO_PROCESS.kill()
sys.stdout.write('DONE\n')
sys.stdout.flush()
# delete MONGO_DIR
shutil.rmtree(MONGO_DIR)
| mit |
kumarshubham/xhtml2pdf | xhtml2pdf/__init__.py | 41 | 1566 | # -*- coding: utf-8 -*-
import logging
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 238 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-06-26 20:06:02 +0200 (Do, 26 Jun 2008) $"
REQUIRED_INFO = """
****************************************************
IMPORT ERROR!
%s
****************************************************
The following Python packages are required for PISA:
- Reportlab Toolkit >= 2.2 <http://www.reportlab.org/>
- HTML5lib >= 0.11.1 <http://code.google.com/p/html5lib/>
Optional packages:
- pyPDF <http://pybrary.net/pyPdf/>
- PIL <http://www.pythonware.com/products/pil/>
""".lstrip()
log = logging.getLogger(__name__)
try:
from xhtml2pdf.util import REPORTLAB22
if not REPORTLAB22:
raise ImportError, "Reportlab Toolkit Version 2.2 or higher needed"
except ImportError, e:
import sys
sys.stderr.write(REQUIRED_INFO % e)
log.error(REQUIRED_INFO % e)
raise
from xhtml2pdf.version import VERSION
__version__ = VERSION
| apache-2.0 |
kumy/geokrety-api | tests/unittests/utils/responses/base.py | 2 | 10902 | # -*- coding: utf-8 -*-
import pprint
from datetime import datetime
class BaseResponse(dict):
class assertRaises:
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.failureException = AssertionError
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb): # pragma: no cover
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def __init__(self, data):
if data is None:
self.update({})
elif data.get('data'):
self.update(data['data'])
else:
self.update(data)
if data is not None and data.get('included'):
self['included'] = data['included']
@property
def id(self):
assert 'id' in self
try:
return int(self['id'])
except AssertionError: # pragma: no cover
raise AttributeError("Object id not found in response.")
@property
def created_on_datetime(self):
return self._format_datetime(self.get_attribute('created-on-datetime'))
def _format_datetime(self, date_time):
return datetime.strptime(date_time, '%Y-%m-%dT%H:%M:%S')
def get_attribute(self, attribute):
attribute = attribute.replace('_', '-')
assert 'attributes' in self
assert attribute in self['attributes'], attribute
try:
return self['attributes'][attribute]
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' not found in response." % attribute)
def _get_relationships(self, relationships):
relationships = relationships.replace('_', '-')
assert 'relationships' in self
assert relationships in self['relationships'], relationships
return self['relationships'][relationships]
def assertDataIsNone(self):
"""Assert the data attribute is None
"""
assert 'data' in self
assert self['data'] is None, self['data']
def assertHasId(self, value):
"""Assert the ID has value
"""
assert str(self.id) == str(value), "Expected id to be '{}' but was '{}'".format(value, self.id)
return self
def assertHasRelationshipRelated(self, relation_type, link):
"""Assert an error response has a specific pointer
"""
relation_type = relation_type.replace('_', '-')
assert 'relationships' in self
assert relation_type in self['relationships']
assert 'links' in self['relationships'][relation_type]
assert 'related' in self['relationships'][relation_type]['links']
try:
assert link in self['relationships'][relation_type]['links']['related']
except AssertionError: # pragma: no cover
raise AttributeError(
"assert '%s' in self['relationships']['%s']['links']['related']" % (link, relation_type))
return self
def assertHasRelationshipSelf(self, relation_type, link):
"""Assert an error response has a specific pointer
"""
relation_type = relation_type.replace('_', '-')
assert 'relationships' in self
assert relation_type in self['relationships'], relation_type
assert 'links' in self['relationships'][relation_type], relation_type
assert 'self' in self['relationships'][relation_type]['links'], relation_type
assert link in self['relationships'][relation_type]['links']['self'], link
return self
def assertHasAttribute(self, attribute, value):
"""Assert a response attribute has a specific value
"""
try:
assert self.get_attribute(attribute) == value
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' value '%s' not the expected one (%s)." %
(attribute, self.get_attribute(attribute), value))
return self
def assertAttributeNotPresent(self, attribute):
"""Assert a response doesn't contains an attribute
"""
try:
self.get_attribute(attribute)
raise AttributeError("Attribute '{}' was not found in response but we don't expect it.".format(attribute)) # pragma: no cover
except AssertionError:
pass
return self
def assertNotHasAttribute(self, attribute, value):
"""Assert a response attribute equals a specific value
"""
try:
assert self.get_attribute(attribute) != value
except AssertionError: # pragma: no cover
raise AttributeError("Attribute '%s' value '%s' is expected to be different then '%s'." %
(attribute, self.get_attribute(attribute), value))
return self
def assertHasRelationshipData(self, relationships, value, obj_type):
"""Assert a response relation has a specific value
"""
rel = self._get_relationships(relationships)
if value is None: # pragma: no cover
assert rel['data'] is None
assert rel['data'] is not None
try:
assert 'id' in rel['data']
assert rel['data']['id'] == str(value)
except AssertionError: # pragma: no cover
raise AttributeError("Relationships '%s' value should be '%s' but was '%s'." %
(relationships, value, rel['data']['id']))
try:
assert 'type' in rel['data']
assert rel['data']['type'] == obj_type
except AssertionError: # pragma: no cover
raise AttributeError("Relationships '%s' should be '%s' but was '%s'." %
(relationships, obj_type, rel['data']['type']))
return self
def assertHasRelationshipDatas(self, relationships, values, obj_type):
"""Assert a response relation has specific values
"""
rel = self._get_relationships(relationships)
if values is None: # pragma: no cover
assert rel['data'] is None, rel
assert rel['data'] is not None, rel
str_values = [str(value.id) for value in values]
found_ids = []
try:
# returned data in expected list
for data in rel['data']:
assert 'id' in data
assert data['id'] in str_values, data['id']
assert 'type' in data
assert data['type'] == obj_type
found_ids.append(data['id'])
# expect to find all expected values in response
for value in str_values:
assert value in found_ids
except AssertionError: # pragma: no cover
raise AttributeError("Included relationships '%s' not found in response, expected %s, found %s." % (
relationships, str_values, rel['data']))
return self
def assertHasData(self, obj_type, value):
"""Assert a response has a specific data value
"""
assert 'type' in self, "'type' key not found in 'data'"
assert 'id' in self, "'id' key not found in 'data'"
assert self['type'] == obj_type, "type '{}' expected but found '{}'".format(obj_type, self['type'])
assert self['id'] == value, "id '{}' expected but found '{}'".format(value, self['id'])
return self
def assertCreationDateTime(self):
self.assertDateTimePresent('created-on-datetime')
return self
def assertUpdatedDateTime(self):
self.assertDateTimePresent('updated-on-datetime')
return self
def assertHasAttributeDateTimeOrNone(self, attribute, date_time):
if date_time is None:
return
self.assertHasAttributeDateTime(attribute, date_time)
return self
def assertHasAttributeDateTime(self, attribute, date_time):
self.assertDateTimePresent(attribute)
if isinstance(date_time, datetime):
date_time = date_time.strftime("%Y-%m-%dT%H:%M:%S")
assert self.get_attribute(attribute)[:-1] == date_time[:-1]
return self
def assertDateTimePresent(self, attribute):
datetime = self.get_attribute(attribute)
self.assertIsDateTime(datetime)
return self
def assertIsDateTime(self, date_time):
if isinstance(date_time, datetime): # pragma: no cover
return self
try:
datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%S")
except ValueError: # pragma: no cover
assert False, 'Date is not parsable (%s)' % date_time
return self
def assertRaiseJsonApiError(self, pointer):
"""Assert an error response has a specific pointer
"""
assert 'errors' in self
for error in self['errors']:
assert 'source' in error
assert 'pointer' in error['source']
if pointer in error['source']['pointer']:
return self
assert False, "JsonApiError pointer '{}' not raised".format(pointer) # pragma: no cover
def assertJsonApiErrorCount(self, count):
"""Assert an error response has a specific number of entries
"""
assert 'errors' in self, "No error found but we expect to see {}".format(count)
assert len(self['errors']) == count, "Expected to find {} errors, but was {}" \
.format(count, self.count)
return self
def assertDateTimeAlmostEqual(self, first, second, delta=1):
""" Compare two datetime attributes, accept maximum difference
of `delta` seconds.
"""
first_attribute = datetime.strptime(self.get_attribute(first), "%Y-%m-%dT%H:%M:%S")
second_attribute = datetime.strptime(self.get_attribute(second), "%Y-%m-%dT%H:%M:%S")
computed_delta = (first_attribute - second_attribute).seconds
assert computed_delta == 0 or computed_delta == 1
def pprint(self): # pragma: no cover
pprint.pprint(self)
return self
| gpl-3.0 |
Svolcano/python_exercise | WinLsLoad/lib/msg/Serialize.py | 1 | 1404 | import json
import logging
from ..cipher.Cipher import Cipher
logger = logging.getLogger(__name__)
class Serialize():
def __init__(self):
pass
def serialize(self, content_obj):
'''
encode content_obj to string ,then encrypt the string.
for example:
content_obj = {'a':'apple','b':'banana'}
encode to string, then encrypt it, return encrypted string.
'''
msg = json.dumps(content_obj)
c = Cipher()
cmsg = c.encrypt(msg)
return cmsg
def unserialize(self, content_str):
'''
decrypt the content_str, then decode it to json object, then renturn object.
'''
if len(content_str) % 16 != 0:
logger.info('not 16 multi bytes.')
return {}
c = Cipher()
msg = c.decrypt(content_str)
try:
obj = json.loads(msg)
except:
logger.info('json loads error')
logger.info( ":".join(hex(ord(c)) for c in msg) )
return {}
return obj
if __name__ == '__main__':
s = Serialize()
a = {'a':'apple','b':'banana'}
print a
b = s.serialize(a)
a = 'ddd'
b = s.serialize(a)
print b
c = s.unserialize(b)
print c
b = json.dumps(a)
print b
b = '1234567887654321'
c = s.unserialize(b)
print c
if {} == c:
print 'null dict'
| mit |
gibiansky/tensorflow | tensorflow/python/summary/event_multiplexer.py | 24 | 13976 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@__init__
@@AddRun
@@AddRunsFromDirectory
@@Reload
@@Runs
@@RunPaths
@@Scalars
@@Graph
@@MetaGraph
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
logging.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
logging.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
logging.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logging.info('Starting AddRunsFromDirectory: %s', path)
for subdir in GetLogdirSubdirectories(path):
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logging.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
logging.info('Finished with EventMultiplexer.Reload()')
return self
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self._GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Audio(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if gfile.Exists(path) and not gfile.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
| apache-2.0 |
UK992/servo | tests/wpt/web-platform-tests/tools/third_party/webencodings/webencodings/x_user_defined.py | 171 | 4307 | # coding: utf-8
"""
webencodings.x_user_defined
~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the x-user-defined encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
codec_info = codecs.CodecInfo(
name='x-user-defined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
# Python 3:
# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
decoding_table = (
'\x00'
'\x01'
'\x02'
'\x03'
'\x04'
'\x05'
'\x06'
'\x07'
'\x08'
'\t'
'\n'
'\x0b'
'\x0c'
'\r'
'\x0e'
'\x0f'
'\x10'
'\x11'
'\x12'
'\x13'
'\x14'
'\x15'
'\x16'
'\x17'
'\x18'
'\x19'
'\x1a'
'\x1b'
'\x1c'
'\x1d'
'\x1e'
'\x1f'
' '
'!'
'"'
'#'
'$'
'%'
'&'
"'"
'('
')'
'*'
'+'
','
'-'
'.'
'/'
'0'
'1'
'2'
'3'
'4'
'5'
'6'
'7'
'8'
'9'
':'
';'
'<'
'='
'>'
'?'
'@'
'A'
'B'
'C'
'D'
'E'
'F'
'G'
'H'
'I'
'J'
'K'
'L'
'M'
'N'
'O'
'P'
'Q'
'R'
'S'
'T'
'U'
'V'
'W'
'X'
'Y'
'Z'
'['
'\\'
']'
'^'
'_'
'`'
'a'
'b'
'c'
'd'
'e'
'f'
'g'
'h'
'i'
'j'
'k'
'l'
'm'
'n'
'o'
'p'
'q'
'r'
's'
't'
'u'
'v'
'w'
'x'
'y'
'z'
'{'
'|'
'}'
'~'
'\x7f'
'\uf780'
'\uf781'
'\uf782'
'\uf783'
'\uf784'
'\uf785'
'\uf786'
'\uf787'
'\uf788'
'\uf789'
'\uf78a'
'\uf78b'
'\uf78c'
'\uf78d'
'\uf78e'
'\uf78f'
'\uf790'
'\uf791'
'\uf792'
'\uf793'
'\uf794'
'\uf795'
'\uf796'
'\uf797'
'\uf798'
'\uf799'
'\uf79a'
'\uf79b'
'\uf79c'
'\uf79d'
'\uf79e'
'\uf79f'
'\uf7a0'
'\uf7a1'
'\uf7a2'
'\uf7a3'
'\uf7a4'
'\uf7a5'
'\uf7a6'
'\uf7a7'
'\uf7a8'
'\uf7a9'
'\uf7aa'
'\uf7ab'
'\uf7ac'
'\uf7ad'
'\uf7ae'
'\uf7af'
'\uf7b0'
'\uf7b1'
'\uf7b2'
'\uf7b3'
'\uf7b4'
'\uf7b5'
'\uf7b6'
'\uf7b7'
'\uf7b8'
'\uf7b9'
'\uf7ba'
'\uf7bb'
'\uf7bc'
'\uf7bd'
'\uf7be'
'\uf7bf'
'\uf7c0'
'\uf7c1'
'\uf7c2'
'\uf7c3'
'\uf7c4'
'\uf7c5'
'\uf7c6'
'\uf7c7'
'\uf7c8'
'\uf7c9'
'\uf7ca'
'\uf7cb'
'\uf7cc'
'\uf7cd'
'\uf7ce'
'\uf7cf'
'\uf7d0'
'\uf7d1'
'\uf7d2'
'\uf7d3'
'\uf7d4'
'\uf7d5'
'\uf7d6'
'\uf7d7'
'\uf7d8'
'\uf7d9'
'\uf7da'
'\uf7db'
'\uf7dc'
'\uf7dd'
'\uf7de'
'\uf7df'
'\uf7e0'
'\uf7e1'
'\uf7e2'
'\uf7e3'
'\uf7e4'
'\uf7e5'
'\uf7e6'
'\uf7e7'
'\uf7e8'
'\uf7e9'
'\uf7ea'
'\uf7eb'
'\uf7ec'
'\uf7ed'
'\uf7ee'
'\uf7ef'
'\uf7f0'
'\uf7f1'
'\uf7f2'
'\uf7f3'
'\uf7f4'
'\uf7f5'
'\uf7f6'
'\uf7f7'
'\uf7f8'
'\uf7f9'
'\uf7fa'
'\uf7fb'
'\uf7fc'
'\uf7fd'
'\uf7fe'
'\uf7ff'
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| mpl-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/gdal/geometries.py | 388 | 26357 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print pnt
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print mpnt
MULTIPOINT (-90 30,-90 30)
>>> print mpnt.srs.name
WGS 84
>>> print mpnt.srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print mpnt.proj
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print mpnt
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print gt1 == 3, gt1 == 'Polygon' # Equivalence works w/non-OGRGeomType objects
True
"""
# Python library requisites.
import sys
from binascii import a2b_hex
from ctypes import byref, string_at, c_char_p, c_double, c_ubyte, c_void_p
# Getting GDAL prerequisites
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.libgdal import GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
# Getting the ctypes prototype functions that interface w/the GDAL C library.
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
#### OGRGeometry Class ####
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, basestring)
# If HEX, unpack input to to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = buffer(a2b_hex(geom_input.upper()))
str_instance = False
# Constructing the geometry,
if str_instance:
# Checking if unicode
if isinstance(geom_input, unicode):
# Encoding to ASCII, WKT or HEX doesn't need any more.
geom_input = geom_input.encode('ascii')
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt'))))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt'))), None, byref(c_void_p()))
elif json_m:
if GEOJSON:
g = capi.from_json(geom_input)
else:
raise NotImplementedError('GeoJSON input only supported on GDAL 1.5+.')
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
ogr_t = OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, buffer):
# WKB was passed in
g = capi.from_wkb(str(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise OGRException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise OGRException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if bool(srs): self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr: capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return str(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr: raise OGRException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
#### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
if isinstance(self, GeometryCollection) and GDAL_VERSION < (1, 5, 2):
# On GDAL versions prior to 1.5.2, there exists a bug in which
# the coordinate dimension of geometry collections is always 2:
# http://trac.osgeo.org/gdal/ticket/2334
# Here we workaround by returning the coordinate dimension of the
# first geometry in the collection instead.
if len(self):
return capi.get_coord_dim(capi.get_geom_ref(self.ptr, 0))
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if not dim in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
#### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, (int, long, basestring)):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs: return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, (int, long)):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
#### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return str(self.wkb).encode('hex').upper()
#return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry (requires
GDAL 1.5+).
"""
if GEOJSON:
return capi.to_json(self.ptr)
else:
raise NotImplementedError('GeoJSON output only supported on GDAL 1.5+.')
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
if GEOJSON:
return capi.to_kml(self.ptr, None)
else:
raise NotImplementedError('KML output only supported on GDAL 1.5+.')
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
wkb = capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return buffer(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
#### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Have to get the coordinate dimension of the original geometry
# so it can be used to reset the transformed geometry's dimension
# afterwards. This is done because of GDAL bug (in versions prior
# to 1.7) that turns geometries 3D after transformation, see:
# http://trac.osgeo.org/gdal/changeset/17792
if GDAL_VERSION < (1, 7):
orig_dim = self.coord_dim
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, (int, long, basestring)):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# Setting with original dimension, see comment above.
if GDAL_VERSION < (1, 7):
if isinstance(self, GeometryCollection):
# With geometry collections have to set dimension on
# each internal geometry reference, as the collection
# dimension isn't affected.
for i in xrange(len(self)):
internal_ptr = capi.get_geom_ref(self.ptr, i)
if orig_dim != capi.get_coord_dim(internal_ptr):
capi.set_coord_dim(internal_ptr, orig_dim)
else:
if self.coord_dim != orig_dim:
self.coord_dim = orig_dim
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
#### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
#### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in xrange(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple([self[i] for i in xrange(len(self))])
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in xrange(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString): pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in xrange(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in xrange(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom: capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, basestring):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise OGRException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection): pass
class MultiLineString(GeometryCollection): pass
class MultiPolygon(GeometryCollection): pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1 : Point,
2 : LineString,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit : Point,
2 + OGRGeomType.wkb25bit : LineString,
3 + OGRGeomType.wkb25bit : Polygon,
4 + OGRGeomType.wkb25bit : MultiPoint,
5 + OGRGeomType.wkb25bit : MultiLineString,
6 + OGRGeomType.wkb25bit : MultiPolygon,
7 + OGRGeomType.wkb25bit : GeometryCollection,
}
| apache-2.0 |
slifty/audfprint | comp_file_lines.py | 5 | 1392 | #!/usr/bin/env python
# comp_file_lines.py
#
# Python script to count number of exact matching lines between two files, no edit distance
# 2014-09-07 Dan Ellis dpwe@ee.columbia.edu
import sys
verbose = False
onefile = False
if len(sys.argv) == 2:
# Special case: if a single file, compare the first ws-separated field with remainder
onefile = True
print "onefile true"
elif len(sys.argv) < 3:
print "Usage:", sys.argv[0], "file1.txt file2.txt [verbose]"
sys.exit(1)
file1 = sys.argv[1]
if not onefile:
file2 = sys.argv[2]
if len(sys.argv) > 3:
verbose = True
# Read in the files
with open(file1) as f:
item1s = [val.rstrip("\n") for val in f]
if onefile:
# Set item2s to everything after first block of WS in each line.
item2s = [item.split(None, 1)[1] for item in item1s]
# Replace items1s with everything before first WS in each line.
item1s = [item.split(None, 1)[0] for item in item1s]
else:
with open(file2) as f:
item2s = [val.rstrip("\n") for val in f]
# Now, make a boolean vector of correctness
import numpy as np
correct = np.zeros(len(item1s), np.float)
for ix, items in enumerate(zip(item1s, item2s)):
if items[0] == items[1]:
correct[ix] = 1.0
else:
if verbose:
print items
print int(np.sum(correct)),"correct out of", len(correct), "= %.1f%%" % (100.0*np.mean(correct))
| mit |
trading-dev/trading-coin | contrib/linearize/linearize-hashes.py | 1 | 3038 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 21212
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
synicalsyntax/zulip | zerver/lib/storage.py | 8 | 2688 | # Useful reading is:
# https://zulip.readthedocs.io/en/latest/subsystems/html-css.html#front-end-build-process
import os
from typing import Optional
from django.conf import settings
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
if settings.DEBUG:
from django.contrib.staticfiles.finders import find
def static_path(path: str) -> str:
return find(path) or "/nonexistent"
else:
def static_path(path: str) -> str:
return os.path.join(settings.STATIC_ROOT, path)
class IgnoreBundlesManifestStaticFilesStorage(ManifestStaticFilesStorage):
def hashed_name(self, name: str, content: Optional[str]=None, filename: Optional[str]=None) -> str:
ext = os.path.splitext(name)[1]
if name.startswith("webpack-bundles"):
# Hack to avoid renaming already-hashnamed webpack bundles
# when minifying; this was causing every bundle to have
# two hashes appended to its name, one by webpack and one
# here. We can't just skip processing of these bundles,
# since we do need the Django storage to add these to the
# manifest for django_webpack_loader to work. So, we just
# use a no-op hash function for these already-hashed
# assets.
return name
if ext in ['.png', '.gif', '.jpg', '.svg']:
# Similarly, don't hash-rename image files; we only serve
# the original file paths (not the hashed file paths), and
# so the only effect of hash-renaming these is to increase
# the size of release tarballs with duplicate copies of thesex.
#
# One could imagine a future world in which we instead
# used the hashed paths for these; in that case, though,
# we should instead be removing the non-hashed paths.
return name
if ext in ['json', 'po', 'mo', 'mp3', 'ogg', 'html']:
# And same story for translation files, sound files, etc.
return name
return super().hashed_name(name, content, filename)
class ZulipStorage(IgnoreBundlesManifestStaticFilesStorage):
# This is a hack to use staticfiles.json from within the
# deployment, rather than a directory under STATIC_ROOT. By doing
# so, we can use a different copy of staticfiles.json for each
# deployment, which ensures that we always use the correct static
# assets for each deployment.
manifest_name = os.path.join(settings.DEPLOY_ROOT, "staticfiles.json")
def path(self, name: str) -> str:
if name == self.manifest_name:
return name
return super().path(name)
| apache-2.0 |
datakid/tvet | tafe/migrations/0039_auto__add_field_subject_staff_member.py | 1 | 21951 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subject.staff_member'
db.add_column('tafe_subject', 'staff_member',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tafe.Staff'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Subject.staff_member'
db.delete_column('tafe_subject', 'staff_member_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tafe.applicant': {
'Meta': {'object_name': 'Applicant'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'applied_for': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applicants'", 'to': "orm['tafe.Course']"}),
'date_of_application': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_offer_accepted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_offer_sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'education_level': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'eligibility': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'01'", 'max_length': "'2'", 'null': 'True', 'blank': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'applicant_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'applicant_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'ranking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'short_listed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'blank': 'True'}),
'successful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'test_ap': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'test_eng': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'test_ma': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.assessment': {
'Meta': {'object_name': 'Assessment'},
'date_due': ('django.db.models.fields.DateField', [], {}),
'date_given': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assessments'", 'to': "orm['tafe.Subject']"})
},
'tafe.course': {
'Meta': {'object_name': 'Course'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.Enrolment']", 'blank': 'True'}),
'subjects': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'courses'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['tafe.Subject']"}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'tafe.credential': {
'Meta': {'object_name': 'Credential'},
'aqf_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credential_last_change_by'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'credential_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'tafe.enrolment': {
'Meta': {'object_name': 'Enrolment'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enrolments'", 'to': "orm['tafe.Course']"}),
'date_ended': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 10, 17, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enrolment_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'mark': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enrolment_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enrolments'", 'to': "orm['tafe.Student']"}),
'withdrawn_reason': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'tafe.grade': {
'Meta': {'object_name': 'Grade'},
'date_started': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grade_last_change_by'", 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'grade_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'results': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'grades'", 'null': 'True', 'to': "orm['tafe.Result']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grades'", 'to': "orm['tafe.Student']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grades'", 'to': "orm['tafe.Subject']"})
},
'tafe.result': {
'Meta': {'object_name': 'Result'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tafe.Assessment']"}),
'date_submitted': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'result_last_change_by'", 'to': "orm['auth.User']"}),
'mark': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'result_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"})
},
'tafe.session': {
'Meta': {'object_name': 'Session'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_number': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.StudentAttendance']", 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['tafe.Subject']"}),
'timetable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['tafe.Timetable']"})
},
'tafe.staff': {
'Meta': {'object_name': 'Staff'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'credential': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'credentials'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['tafe.Credential']"}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'01'", 'max_length': "'2'", 'null': 'True', 'blank': 'True'}),
'islpr_listening': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_overall': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_reading': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_speaking': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_writing': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staff_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staff_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.staffattendance': {
'Meta': {'object_name': 'StaffAttendance'},
'absent': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'staffattendance_last_change_by'", 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staffattendance_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'staffattendance_attendance_records'", 'to': "orm['tafe.Session']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'staff_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attendance_records'", 'to': "orm['tafe.Staff']"})
},
'tafe.student': {
'Meta': {'object_name': 'Student'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'application_details': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tafe.Applicant']"}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'education_level': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'01'", 'max_length': "'2'", 'null': 'True', 'blank': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'student_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'student_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.studentattendance': {
'Meta': {'object_name': 'StudentAttendance'},
'absent': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studentattendance_last_change_by'", 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studentattendance_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studentattendance_attendance_records'", 'to': "orm['tafe.Session']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attendance_records'", 'to': "orm['tafe.Student']"})
},
'tafe.subject': {
'Meta': {'object_name': 'Subject'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'semester': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40'}),
'staff_member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tafe.Staff']", 'null': 'True', 'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.Grade']", 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'tafe.timetable': {
'Meta': {'unique_together': "(('year', 'term'),)", 'object_name': 'Timetable'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '12'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'term': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['tafe'] | gpl-3.0 |
certik/pyjamas | examples/infohierarchy/InfoDirectory.py | 5 | 15139 | import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.Grid import Grid
from pyjamas.ui.TabPanel import TabPanel
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.FlexTable import FlexTable
from pyjamas.ui.Image import Image
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
from pyjamas.ui import HasAlignment
from pyjamas import Window
#from pyjamas.horizsplitpanel import HorizontalSplitPanel
from pyjamas.JSONService import JSONProxy
from Trees import Trees
from pyjamas.Timer import Timer
class CollapserPanel(SimplePanel):
def __init__(self, sink):
SimplePanel.__init__(self)
self.sink = sink
self.caption = HTML()
self.child = None
self.showing = False
self.dragging = False
self.dragStartX = 0
self.dragStartY = 0
self.panel = FlexTable()
self.collapse = Image("./images/cancel.png")
self.collapse.addClickListener(self)
dock = DockPanel()
dock.setSpacing(0)
dock.add(self.collapse, DockPanel.EAST)
dock.add(self.caption, DockPanel.WEST)
dock.setCellHorizontalAlignment(self.collapse, HasAlignment.ALIGN_RIGHT)
dock.setCellVerticalAlignment(self.collapse, HasAlignment.ALIGN_TOP)
dock.setCellHorizontalAlignment(self.caption, HasAlignment.ALIGN_LEFT)
dock.setCellWidth(self.caption, "100%")
dock.setWidth("100%")
dock.setHeight("100%")
self.panel.setWidget(0, 0, dock)
self.panel.setHeight("100%")
self.panel.setWidth("100%")
self.panel.setBorderWidth(0)
self.panel.setCellPadding(0)
self.panel.setCellSpacing(0)
self.panel.getCellFormatter().setHeight(1, 0, "100%")
self.panel.getCellFormatter().setWidth(1, 0, "100%")
self.panel.getCellFormatter().setAlignment(1, 0, HasHorizontalAlignment.ALIGN_LEFT, HasVerticalAlignment.ALIGN_TOP)
SimplePanel.setWidget(self, self.panel)
self.setStyleName("gwt-DialogBox")
self.caption.setStyleName("Caption")
self.collapse.setStyleName("Close")
dock.setStyleName("Header")
#self.caption.addMouseListener(self)
self.collapsed = False
self.collapsed_width = "15px"
self.uncollapsed_width = "100%"
def setInitialWidth(self, width):
self.uncollapsed_width = width
SimplePanel.setWidth(self, width)
self.sink.setCollapserWidth(self, width)
def setHeight(self, height):
SimplePanel.setHeight(self, height)
def onClick(self, sender):
if self.collapsed == False:
self.collapse.setUrl("./tree_closed.gif")
self.collapsed = True
self.caption.setVisible(False)
if self.child:
self.child.setVisible(False)
self.setWidth(self.collapsed_width)
self.sink.setCollapserWidth(self, self.collapsed_width)
else:
self.collapse.setUrl("./images/cancel.png")
self.collapsed = False
self.caption.setVisible(True)
if self.child:
self.child.setVisible(True)
self.setWidth(self.uncollapsed_width)
self.sink.setCollapserWidth(self, self.uncollapsed_width)
def setHTML(self, html):
self.caption.setHTML(html)
def setText(self, text):
self.caption.setText(text)
def remove(self, widget):
if self.child != widget:
return False
self.panel.remove(widget)
return True
def doAttachChildren(self):
SimplePanel.doAttachChildren(self)
self.caption.onAttach()
def doDetachChildren(self):
SimplePanel.doDetachChildren(self)
self.caption.onDetach()
def setWidget(self, widget):
if self.child is not None:
self.panel.remove(self.child)
if widget is not None:
self.panel.setWidget(1, 0, widget)
self.child = widget
def space_split(data):
res = []
idx = data.find(" ")
res.append(data[:idx])
res.append(data[idx+1:])
return res
class RightGrid(DockPanel):
def __init__(self, title):
DockPanel.__init__(self)
self.grid = FlexTable()
title = HTML(title)
self.add(title, DockPanel.NORTH)
self.setCellHorizontalAlignment(title,
HasHorizontalAlignment.ALIGN_LEFT)
self.add(self.grid, DockPanel.CENTER)
self.grid.setBorderWidth("0px")
self.grid.setCellSpacing("0px")
self.grid.setCellPadding("4px")
self.formatCell(0, 0)
self.grid.setHTML(0, 0, " ")
def clear_items(self):
self.index = 0
self.items = {}
def set_items(self, items):
self.items = items
self.index = 0
self.max_rows = 0
self.max_cols = 0
Timer(1, self)
def onTimer(self, t):
count = 0
while count < 10 and self.index < len(self.items):
self._add_items(self.index)
self.index += 1
count += 1
if self.index < len(self.items):
Timer(1, self)
def _add_items(self, i):
item = self.items[i]
command = item[0]
col = item[1]
row = item[2]
data = item[3]
format_row = -1
format_col = -1
if col+1 > self.max_cols:
format_col = self.max_cols
#self.grid.resizeColumns(col+1)
self.max_cols = col+1
if row+1 >= self.max_rows:
format_row = self.max_rows
#self.grid.resizeRows(row+1)
self.max_rows = row+1
if format_row >= 0:
for k in range(format_row, self.max_rows):
self.formatCell(k, 0)
self.formatCell(row, col)
cf = self.grid.getCellFormatter()
if command == 'data':
self.grid.setHTML(row, col, data)
elif command == 'cellstyle':
data = space_split(data)
attr = data[0]
val = data[1]
cf.setStyleAttr(row, col, attr, val)
elif command == 'align':
data = space_split(data)
vert = data[0]
horiz = data[1]
if vert != '-':
cf.setVerticalAlignment(row, col, vert)
if horiz != '-':
cf.setHorizontalAlignment(row, col, horiz)
elif command == 'cellspan':
data = space_split(data)
rowspan = data[0]
colspan = data[1]
if colspan != '-':
cf.setColSpan(row, col, colspan)
if rowspan != '-':
cf.setRowSpan(row, col, rowspan)
def formatCell(self, row, col):
self.grid.prepareCell(row, col)
if col == 0 and row != 0:
self.grid.setHTML(row, col, "%d" % row)
if row != 0 and col != 0:
#self.grid.setHTML(row, col, " ")
fmt = "rightpanel-cellformat"
if col == 0 and row == 0:
fmt = "rightpanel-cellcornerformat"
elif row == 0:
fmt = "rightpanel-celltitleformat"
elif col == 0:
fmt = "rightpanel-cellleftformat"
self.grid.getCellFormatter().setStyleName(row, col, fmt)
class RightPanel(DockPanel):
def __init__(self):
DockPanel.__init__(self)
self.grids = {}
self.g = Grid()
self.g.setCellSpacing("0px")
self.g.setCellPadding("8px")
self.title = HTML(" ")
self.title.setStyleName("rightpanel-title")
self.add(self.title, DockPanel.NORTH)
self.setCellWidth(self.title, "100%")
self.setCellHorizontalAlignment(self.title,
HasHorizontalAlignment.ALIGN_LEFT)
self.add(self.g, DockPanel.CENTER)
def setTitle(self, title):
self.title.setHTML(title)
def clear_items(self):
for i in range(len(self.grids)):
g = self.grids[i]
if hasattr(g, "clear_items"):
g.clear_items()
self.grids = {}
self.g.resize(0, 0)
def setup_panels(self, datasets):
self.grids = {}
self.data = {}
self.names = {}
self.loaded = {}
size = len(datasets)
self.g.resize(size, 1)
#for i in range(size):
# item = datasets[i]
# fname = item[0]
# self.grids[i] = RightGrid(fname)
# self.g.setWidget(i, 0, self.grids[i])
def add_html(self, html, name, index):
self.data[index] = html
self.names[index] = name
self.grids[index] = HTML(html)
self.g.setWidget(index, 0, self.grids[index])
def add_items(self, items, name, index):
self.data[index] = items
self.names[index] = name
self.grids[index] = RightGrid("")
self.grids[index].set_items(items)
self.g.setWidget(index, 0, self.grids[index])
class MidPanel(Grid):
def __init__(self, sink):
Grid.__init__(self)
self.resize(1, 1)
self.addTableListener(self)
self.sink = sink
self.selected_row = -1
def set_items(self, items):
if self.selected_row != -1:
self.styleRow(self.selected_row, False)
self.item_names = []
self.item_locations = []
self.resizeRows(len(items))
for i in range(len(items)):
item = items[i]
name = item[0]
location = item[1]
self.setHTML(i, 0, name)
self.item_names.append(name)
self.item_locations.append(location)
def onCellClicked(self, sender, row, col):
self.styleRow(self.selected_row, False)
self.selected_row = row
self.styleRow(self.selected_row, True)
self.sink.select_right_grid(self.item_locations[row],
self.item_names[row])
def styleRow(self, row, selected):
if (row != -1):
if (selected):
self.getRowFormatter().addStyleName(row, "midpanel-SelectedRow")
else:
self.getRowFormatter().removeStyleName(row, "midpanel-SelectedRow")
class InfoDirectory:
def onModuleLoad(self):
self.remote = InfoServicePython()
self.tree_width = 200
self.tp = HorizontalPanel()
self.tp.setWidth("%dpx" % (self.tree_width))
self.treeview = Trees()
self.treeview.fTree.addTreeListener(self)
self.sp = ScrollPanel()
self.tp.add(self.treeview)
self.sp.add(self.tp)
self.sp.setHeight("100%")
self.horzpanel1 = HorizontalPanel()
self.horzpanel1.setSize("100%", "100%")
self.horzpanel1.setBorderWidth(1)
self.horzpanel1.setSpacing("10px")
self.rp = RightPanel()
self.rps = ScrollPanel()
self.rps.add(self.rp)
self.rps.setWidth("100%")
self.rp.setWidth("100%")
self.cp1 = CollapserPanel(self)
self.cp1.setWidget(self.sp)
self.cp1.setHTML(" ")
self.midpanel = MidPanel(self)
self.cp2 = CollapserPanel(self)
self.cp2.setWidget(self.midpanel)
self.cp2.setHTML(" ")
self.horzpanel1.add(self.cp1)
self.horzpanel1.add(self.cp2)
self.horzpanel1.add(self.rps)
self.cp1.setInitialWidth("%dpx" % self.tree_width)
self.cp2.setInitialWidth("200px")
RootPanel().add(self.horzpanel1)
width = Window.getClientWidth()
height = Window.getClientHeight()
self.onWindowResized(width, height)
Window.addWindowResizeListener(self)
def setCollapserWidth(self, widget, width):
self.horzpanel1.setCellWidth(widget, width)
def onWindowResized(self, width, height):
#self.hp.setWidth("%dpx" % (width - self.tree_width))
#self.hp.setHeight("%dpx" % (height - 20))
self.cp1.setHeight("%dpx" % (height - 30))
self.cp2.setHeight("%dpx" % (height - 30))
self.rps.setHeight("%dpx" % (height - 30))
self.horzpanel1.setHeight("%dpx" % (height - 20))
def onTreeItemStateChanged(self, item):
if item.isSelected():
self.onTreeItemSelected(item)
def onTreeItemSelected(self, item):
obj = item.getUserObject()
if len(obj.children) != 0:
self.clear_mid_panel()
return
self.remote.get_midpanel_data(obj.root + "/" + obj.text, self)
self.cp2.setHTML(obj.text)
self.clear_right_panel()
def clear_right_panel(self):
self.horzpanel1.remove(2)
self.horzpanel1.insert(HTML(""), 2)
self.rp.setTitle(" ")
def clear_mid_panel(self):
self.clear_right_panel()
#self.horzpanel2.setLeftWidget(HTML(""))
def set_mid_panel(self, response):
self.midpanel.set_items(response)
self.cp2.setWidget(self.midpanel)
def select_right_grid(self, location, name):
self.horzpanel1.remove(2)
self.horzpanel1.insert(self.rps, 2)
self.rp.setTitle(name)
self.remote.get_rightpanel_datanames(location, self)
def get_rightpanel_datasets(self, datasets):
self.rp.clear_items()
self.rp.setup_panels(datasets)
for i in range(len(datasets)):
item = datasets[i]
fname = item[0]
self.remote.get_rightpanel_data(fname, fname, i, self)
def fill_right_grid(self, data):
index = data.get('index')
name = data.get('name')
if data.has_key('items'):
self.rp.add_items(data.get('items'), name, index)
elif data.has_key('html'):
self.rp.add_html(data.get('html'), name, index)
def onRemoteResponse(self, response, request_info):
method = request_info.method
if method == "get_midpanel_data":
self.set_mid_panel(response)
elif method == "get_rightpanel_datanames":
self.get_rightpanel_datasets(response)
elif method == "get_rightpanel_data":
self.fill_right_grid(response)
def onRemoteError(self, code, message, request_info):
RootPanel().add(HTML("Server Error or Invalid Response: ERROR " + code))
RootPanel().add(HTML(message))
class InfoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "/infoservice/EchoService.py",
["get_midpanel_data",
"get_rightpanel_datanames",
"get_rightpanel_data"])
if __name__ == '__main__':
pyjd.setup("http://127.0.0.1/examples/infohierarchy/public/InfoDirectory.html")
app = InfoDirectory()
app.onModuleLoad()
pyjd.run()
| apache-2.0 |
Belgabor/django | django/contrib/databrowse/plugins/calendars.py | 247 | 4317 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.views.generic import date_based
from django.utils import datetime_safe
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
self.field_names = field_names
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set, it takes
take that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)])
else:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View calendar by:</strong> %s</p>' % \
u', '.join(['<a href="calendars/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if isinstance(easy_instance_field.field, models.DateField):
d = easy_instance_field.raw_value
return [mark_safe(u'%s%s/%s/%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
str(d.year),
datetime_safe.new_date(d).strftime('%b').lower(),
d.day))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no DateFields, there's no point in going further.
if not self.fields:
raise http.Http404('The requested model has no calendars.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/')
if self.fields.has_key(url_bits[0]):
return self.calendar_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k:k.verbose_name)
return render_to_response('databrowse/calendar_homepage.html', {'root_url': self.site.root_url, 'model': easy_model, 'field_list': field_list})
def calendar_view(self, request, field, year=None, month=None, day=None):
easy_model = EasyModel(self.site, self.model)
queryset = easy_model.get_query_set()
extra_context = {'root_url': self.site.root_url, 'model': easy_model, 'field': field}
if day is not None:
return date_based.archive_day(request, year, month, day, queryset, field.name,
template_name='databrowse/calendar_day.html', allow_empty=False, allow_future=True,
extra_context=extra_context)
elif month is not None:
return date_based.archive_month(request, year, month, queryset, field.name,
template_name='databrowse/calendar_month.html', allow_empty=False, allow_future=True,
extra_context=extra_context)
elif year is not None:
return date_based.archive_year(request, year, queryset, field.name,
template_name='databrowse/calendar_year.html', allow_empty=False, allow_future=True,
extra_context=extra_context)
else:
return date_based.archive_index(request, queryset, field.name,
template_name='databrowse/calendar_main.html', allow_empty=True, allow_future=True,
extra_context=extra_context)
assert False, ('%s, %s, %s, %s' % (field, year, month, day))
| bsd-3-clause |
achesnais/open-concordance | corpus.py | 1 | 6801 | #!/usr/bin/python3
"""
Open Concordance by Antoine Chesnais. A simple corpus processing tool.
Copyright (C) 2014 Antoine Chesnais
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os, time, threading # Time is used for testing purposes.
class Corpus:
def __init__(self, path=None):
self.texts = {}
if path and os.path.exists(path):
if os.path.isdir(path):
self.load_folder(path)
else:
self.load_text_file(path)
def load_text_file(self, path):
"""Loads a single file into corpus"""
if not os.path.exists(path):
raise IOError #not quite sure this is the right exception to raise
with open(path) as f:
self.texts[path] = tokenize(f.read())
def load_folder(self, path):
"""Loads a folder into corpus."""
if not os.path.exists(path):
raise IOError
filelist = os.listdir(path)
pathlist = []
for f in filelist:
pathlist.append(path+'/'+f)
for p in pathlist:
with open(p) as f:
self.texts[p] = tokenize(f.read())
def get_token_size(self):
""" Returns the total tokens in corpus."""
size = 0
for t in self.texts:
size += len(self.texts[t])
return size
def concordance(self, word, left=10, right=10, case=False, output='std'):
"""Aims to be your typical concordancer with KWIC.
TODO: left_text and right_text could be made into lists to allow for easier
KWIC sorting.
Also, max_right and max_left variables could be added to permit to ignore
punctuation in span count, though apparently span count is done in characters,
not words or tokens (as seen in AntConc)
Obviously, implement search of multiple groups, and when that's functional,
regular expression search."""
if case:
match_func = lambda w, t: w == t
else:
word = word.lower()
match_func = lambda w, t: w == t.lower() # word is already lowered, see line above
results = []
hits = 0
for t in sorted(self.texts):
for index, token in enumerate(self.texts[t]):
if match_func(word, token):
left_text = ""
right_text = ""
for i in range(max(0, index-left), index, 1):
if self.texts[t][i].isalnum():
left_text += ' ' + self.texts[t][i]
elif self.texts[t][i] in ('.', '?', '!', ',', ';', ':', ')'):
left_text += self.texts[t][i]
else:
left_text += ' ' + self.texts[t][i]
for i in range(index + 1, min(index+right+1, len(self.texts[t])), 1):
if self.texts[t][i].isalnum():
right_text += ' ' + self.texts[t][i]
elif self.texts[t][i] in ('.', '?', '!', ',', ';', ':', ')'):
right_text += self.texts[t][i]
else:
right_text += " " + self.texts[t][i]
results.append((token, left_text.strip(), right_text.strip(), t))
hits += 1
if output == 'txt':
l_width = r_width = p_width = 0
w_width = len(word)
for r in results:
l_width = max(l_width, len(r[1]))
r_width = max(r_width, len(r[2]))
p_width = max(p_width, len(r[3]))
with open('{}_concordance_results.txt'.format(word), 'w') as f:
f.write("There are {} hits.\n\n".format(hits))
for r in results:
f.write("{0:>{lw}} <{1:^{ww}}> {2:<{rw}} in file {3:<{pw}}.\n\n".format(r[1], r[0], r[2], r[3], lw=l_width, rw=r_width, ww=w_width, pw=p_width))
else:
return hits, results
def quick_concordance(self, word, case=False):
"""Simple concordance: takes a word and returns how many times it occurs"""
if case:
match_func = lambda w, t: w == t
else:
match_func = lambda w, t: w == t.lower() # word is already lowered, see two lines below
hits = 0
word = word.lower()
for t in self.texts:
for token in self.texts[t]:
if match_func(word, token):
hits += 1
return hits
def word_list(self, case=False, punc=False, num=False):
"""TODO
Return a list of the words (or, rather, the tokens) in the corpus,
along with their raw and relative occurences."""
for t in self.texts:
pass
def get_data(self): # for now avoid using in GUI mode.
"""TODO : gui mode
Computes the lengths of the smallest and largest text, and
average text length."""
min_length = None
max_length = None
length_sum = 0
for t in self.texts:
length = len(self.texts[t])
if min_length:
min_length = min(length, min_length)
else:
min_length = length
if max_length:
max_length = max(length, max_length)
else:
max_length = length
length_sum += length
return min_length, max_length, length_sum
def tokenize(string):
"""A simple tokenizer
A token is any sequence of alphanumeric characters, or a sign of punctuation"""
tokens = []
word = ''
for char in string:
if char.isalnum():
word += char
elif char not in (' ', '\n'):
if word:
tokens.append(word)
word = ''
tokens.append(char)
else:
if word:
tokens.append(word)
word = ''
return tokens
def main():
c = Corpus('txts')
c.get_data()
if __name__ == '__main__':
main()
| gpl-3.0 |
WorksApplications/cassandra | pylib/setup.py | 78 | 1154 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from distutils.core import setup
def get_extensions():
if "--no-compile" in sys.argv:
return []
from Cython.Build import cythonize
return cythonize("cqlshlib/copyutil.py")
setup(
name="cassandra-pylib",
description="Cassandra Python Libraries",
packages=["cqlshlib"],
ext_modules=get_extensions(),
)
| apache-2.0 |
xtmhm2000/scrapy-0.22 | scrapy/tests/test_contrib_loader.py | 24 | 22570 | import unittest
from functools import partial
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, Identity, TakeFirst, \
Compose, MapCompose
from scrapy.item import Item, Field
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
# test items
class NameItem(Item):
name = Field()
class TestItem(NameItem):
url = Field()
summary = Field()
# test item loaders
class NameItemLoader(ItemLoader):
default_item_class = TestItem
class TestItemLoader(NameItemLoader):
name_in = MapCompose(lambda v: v.title())
class DefaultedItemLoader(NameItemLoader):
default_input_processor = MapCompose(lambda v: v[:-1])
# test processors
def processor_with_args(value, other=None, loader_context=None):
if 'key' in loader_context:
return loader_context['key']
return value
class BasicItemLoaderTest(unittest.TestCase):
def test_load_item_using_default_loader(self):
i = TestItem()
i['summary'] = u'lala'
il = ItemLoader(item=i)
il.add_value('name', u'marta')
item = il.load_item()
assert item is i
self.assertEqual(item['summary'], u'lala')
self.assertEqual(item['name'], [u'marta'])
def test_load_item_using_custom_loader(self):
il = TestItemLoader()
il.add_value('name', u'marta')
item = il.load_item()
self.assertEqual(item['name'], [u'Marta'])
def test_load_item_ignore_none_field_values(self):
def validate_sku(value):
# Let's assume a SKU is only digits.
if value.isdigit():
return value
class MyLoader(ItemLoader):
name_out = Compose(lambda vs: vs[0]) # take first which allows empty values
price_out = Compose(TakeFirst(), float)
sku_out = Compose(TakeFirst(), validate_sku)
valid_fragment = u'SKU: 1234'
invalid_fragment = u'SKU: not available'
sku_re = 'SKU: (.+)'
il = MyLoader(item={})
# Should not return "sku: None".
il.add_value('sku', [invalid_fragment], re=sku_re)
# Should not ignore empty values.
il.add_value('name', u'')
il.add_value('price', [u'0'])
self.assertEqual(il.load_item(), {
'name': u'',
'price': 0.0,
})
il.replace_value('sku', [valid_fragment], re=sku_re)
self.assertEqual(il.load_item()['sku'], u'1234')
def test_add_value(self):
il = TestItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_collected_values('name'), [u'Marta'])
self.assertEqual(il.get_output_value('name'), [u'Marta'])
il.add_value('name', u'pepe')
self.assertEqual(il.get_collected_values('name'), [u'Marta', u'Pepe'])
self.assertEqual(il.get_output_value('name'), [u'Marta', u'Pepe'])
# test add object value
il.add_value('summary', {'key': 1})
self.assertEqual(il.get_collected_values('summary'), [{'key': 1}])
il.add_value(None, u'Jim', lambda x: {'name': x})
self.assertEqual(il.get_collected_values('name'), [u'Marta', u'Pepe', u'Jim'])
def test_add_zero(self):
il = NameItemLoader()
il.add_value('name', 0)
self.assertEqual(il.get_collected_values('name'), [0])
def test_replace_value(self):
il = TestItemLoader()
il.replace_value('name', u'marta')
self.assertEqual(il.get_collected_values('name'), [u'Marta'])
self.assertEqual(il.get_output_value('name'), [u'Marta'])
il.replace_value('name', u'pepe')
self.assertEqual(il.get_collected_values('name'), [u'Pepe'])
self.assertEqual(il.get_output_value('name'), [u'Pepe'])
il.replace_value(None, u'Jim', lambda x: {'name': x})
self.assertEqual(il.get_collected_values('name'), [u'Jim'])
def test_get_value(self):
il = NameItemLoader()
self.assertEqual(u'FOO', il.get_value([u'foo', u'bar'], TakeFirst(), unicode.upper))
self.assertEqual([u'foo', u'bar'], il.get_value([u'name:foo', u'name:bar'], re=u'name:(.*)$'))
self.assertEqual(u'foo', il.get_value([u'name:foo', u'name:bar'], TakeFirst(), re=u'name:(.*)$'))
il.add_value('name', [u'name:foo', u'name:bar'], TakeFirst(), re=u'name:(.*)$')
self.assertEqual([u'foo'], il.get_collected_values('name'))
il.replace_value('name', u'name:bar', re=u'name:(.*)$')
self.assertEqual([u'bar'], il.get_collected_values('name'))
def test_iter_on_input_processor_input(self):
class NameFirstItemLoader(NameItemLoader):
name_in = TakeFirst()
il = NameFirstItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_collected_values('name'), [u'marta'])
il = NameFirstItemLoader()
il.add_value('name', [u'marta', u'jose'])
self.assertEqual(il.get_collected_values('name'), [u'marta'])
il = NameFirstItemLoader()
il.replace_value('name', u'marta')
self.assertEqual(il.get_collected_values('name'), [u'marta'])
il = NameFirstItemLoader()
il.replace_value('name', [u'marta', u'jose'])
self.assertEqual(il.get_collected_values('name'), [u'marta'])
il = NameFirstItemLoader()
il.add_value('name', u'marta')
il.add_value('name', [u'jose', u'pedro'])
self.assertEqual(il.get_collected_values('name'), [u'marta', u'jose'])
def test_map_compose_filter(self):
def filter_world(x):
return None if x == 'world' else x
proc = MapCompose(filter_world, str.upper)
self.assertEqual(proc(['hello', 'world', 'this', 'is', 'scrapy']),
['HELLO', 'THIS', 'IS', 'SCRAPY'])
def test_map_compose_filter_multil(self):
class TestItemLoader(NameItemLoader):
name_in = MapCompose(lambda v: v.title(), lambda v: v[:-1])
il = TestItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'Mart'])
item = il.load_item()
self.assertEqual(item['name'], [u'Mart'])
def test_default_input_processor(self):
il = DefaultedItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'mart'])
def test_inherited_default_input_processor(self):
class InheritDefaultedItemLoader(DefaultedItemLoader):
pass
il = InheritDefaultedItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'mart'])
def test_input_processor_inheritance(self):
class ChildItemLoader(TestItemLoader):
url_in = MapCompose(lambda v: v.lower())
il = ChildItemLoader()
il.add_value('url', u'HTTP://scrapy.ORG')
self.assertEqual(il.get_output_value('url'), [u'http://scrapy.org'])
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'Marta'])
class ChildChildItemLoader(ChildItemLoader):
url_in = MapCompose(lambda v: v.upper())
summary_in = MapCompose(lambda v: v)
il = ChildChildItemLoader()
il.add_value('url', u'http://scrapy.org')
self.assertEqual(il.get_output_value('url'), [u'HTTP://SCRAPY.ORG'])
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'Marta'])
def test_empty_map_compose(self):
class IdentityDefaultedItemLoader(DefaultedItemLoader):
name_in = MapCompose()
il = IdentityDefaultedItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'marta'])
def test_identity_input_processor(self):
class IdentityDefaultedItemLoader(DefaultedItemLoader):
name_in = Identity()
il = IdentityDefaultedItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'marta'])
def test_extend_custom_input_processors(self):
class ChildItemLoader(TestItemLoader):
name_in = MapCompose(TestItemLoader.name_in, unicode.swapcase)
il = ChildItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'mARTA'])
def test_extend_default_input_processors(self):
class ChildDefaultedItemLoader(DefaultedItemLoader):
name_in = MapCompose(DefaultedItemLoader.default_input_processor, unicode.swapcase)
il = ChildDefaultedItemLoader()
il.add_value('name', u'marta')
self.assertEqual(il.get_output_value('name'), [u'MART'])
def test_output_processor_using_function(self):
il = TestItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), [u'Mar', u'Ta'])
class TakeFirstItemLoader(TestItemLoader):
name_out = u" ".join
il = TakeFirstItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), u'Mar Ta')
def test_output_processor_error(self):
class TestItemLoader(ItemLoader):
default_item_class = TestItem
name_out = MapCompose(float)
il = TestItemLoader()
il.add_value('name', [u'$10'])
try:
float('$10')
except Exception as e:
expected_exc_str = str(e)
exc = None
try:
il.load_item()
except Exception as e:
exc = e
assert isinstance(exc, ValueError)
s = str(exc)
assert 'name' in s, s
assert '$10' in s, s
assert 'ValueError' in s, s
assert expected_exc_str in s, s
def test_output_processor_using_classes(self):
il = TestItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), [u'Mar', u'Ta'])
class TakeFirstItemLoader(TestItemLoader):
name_out = Join()
il = TakeFirstItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), u'Mar Ta')
class TakeFirstItemLoader(TestItemLoader):
name_out = Join("<br>")
il = TakeFirstItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), u'Mar<br>Ta')
def test_default_output_processor(self):
il = TestItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), [u'Mar', u'Ta'])
class LalaItemLoader(TestItemLoader):
default_output_processor = Identity()
il = LalaItemLoader()
il.add_value('name', [u'mar', u'ta'])
self.assertEqual(il.get_output_value('name'), [u'Mar', u'Ta'])
def test_loader_context_on_declaration(self):
class ChildItemLoader(TestItemLoader):
url_in = MapCompose(processor_with_args, key=u'val')
il = ChildItemLoader()
il.add_value('url', u'text')
self.assertEqual(il.get_output_value('url'), ['val'])
il.replace_value('url', u'text2')
self.assertEqual(il.get_output_value('url'), ['val'])
def test_loader_context_on_instantiation(self):
class ChildItemLoader(TestItemLoader):
url_in = MapCompose(processor_with_args)
il = ChildItemLoader(key=u'val')
il.add_value('url', u'text')
self.assertEqual(il.get_output_value('url'), ['val'])
il.replace_value('url', u'text2')
self.assertEqual(il.get_output_value('url'), ['val'])
def test_loader_context_on_assign(self):
class ChildItemLoader(TestItemLoader):
url_in = MapCompose(processor_with_args)
il = ChildItemLoader()
il.context['key'] = u'val'
il.add_value('url', u'text')
self.assertEqual(il.get_output_value('url'), ['val'])
il.replace_value('url', u'text2')
self.assertEqual(il.get_output_value('url'), ['val'])
def test_item_passed_to_input_processor_functions(self):
def processor(value, loader_context):
return loader_context['item']['name']
class ChildItemLoader(TestItemLoader):
url_in = MapCompose(processor)
it = TestItem(name='marta')
il = ChildItemLoader(item=it)
il.add_value('url', u'text')
self.assertEqual(il.get_output_value('url'), ['marta'])
il.replace_value('url', u'text2')
self.assertEqual(il.get_output_value('url'), ['marta'])
def test_add_value_on_unknown_field(self):
il = TestItemLoader()
self.assertRaises(KeyError, il.add_value, 'wrong_field', [u'lala', u'lolo'])
def test_compose_processor(self):
class TestItemLoader(NameItemLoader):
name_out = Compose(lambda v: v[0], lambda v: v.title(), lambda v: v[:-1])
il = TestItemLoader()
il.add_value('name', [u'marta', u'other'])
self.assertEqual(il.get_output_value('name'), u'Mart')
item = il.load_item()
self.assertEqual(item['name'], u'Mart')
def test_partial_processor(self):
def join(values, sep=None, loader_context=None, ignored=None):
if sep is not None:
return sep.join(values)
elif loader_context and 'sep' in loader_context:
return loader_context['sep'].join(values)
else:
return ''.join(values)
class TestItemLoader(NameItemLoader):
name_out = Compose(partial(join, sep='+'))
url_out = Compose(partial(join, loader_context={'sep': '.'}))
summary_out = Compose(partial(join, ignored='foo'))
il = TestItemLoader()
il.add_value('name', [u'rabbit', u'hole'])
il.add_value('url', [u'rabbit', u'hole'])
il.add_value('summary', [u'rabbit', u'hole'])
item = il.load_item()
self.assertEqual(item['name'], u'rabbit+hole')
self.assertEqual(item['url'], u'rabbit.hole')
self.assertEqual(item['summary'], u'rabbithole')
class ProcessorsTest(unittest.TestCase):
def test_take_first(self):
proc = TakeFirst()
self.assertEqual(proc([None, '', 'hello', 'world']), 'hello')
self.assertEqual(proc([None, '', 0, 'hello', 'world']), 0)
def test_identity(self):
proc = Identity()
self.assertEqual(proc([None, '', 'hello', 'world']),
[None, '', 'hello', 'world'])
def test_join(self):
proc = Join()
self.assertRaises(TypeError, proc, [None, '', 'hello', 'world'])
self.assertEqual(proc(['', 'hello', 'world']), u' hello world')
self.assertEqual(proc(['hello', 'world']), u'hello world')
self.assert_(isinstance(proc(['hello', 'world']), unicode))
def test_compose(self):
proc = Compose(lambda v: v[0], str.upper)
self.assertEqual(proc(['hello', 'world']), 'HELLO')
proc = Compose(str.upper)
self.assertEqual(proc(None), None)
proc = Compose(str.upper, stop_on_none=False)
self.assertRaises(TypeError, proc, None)
def test_mapcompose(self):
filter_world = lambda x: None if x == 'world' else x
proc = MapCompose(filter_world, unicode.upper)
self.assertEqual(proc([u'hello', u'world', u'this', u'is', u'scrapy']),
[u'HELLO', u'THIS', u'IS', u'SCRAPY'])
class SelectortemLoaderTest(unittest.TestCase):
response = HtmlResponse(url="", body="""
<html>
<body>
<div id="id">marta</div>
<p>paragraph</p>
<a href="http://www.scrapy.org">homepage</a>
<img src="/images/logo.png" width="244" height="65" alt="Scrapy">
</body>
</html>
""")
def test_constructor(self):
l = TestItemLoader()
self.assertEqual(l.selector, None)
def test_constructor_errors(self):
l = TestItemLoader()
self.assertRaises(RuntimeError, l.add_xpath, 'url', '//a/@href')
self.assertRaises(RuntimeError, l.replace_xpath, 'url', '//a/@href')
self.assertRaises(RuntimeError, l.get_xpath, '//a/@href')
self.assertRaises(RuntimeError, l.add_css, 'name', '#name::text')
self.assertRaises(RuntimeError, l.replace_css, 'name', '#name::text')
self.assertRaises(RuntimeError, l.get_css, '#name::text')
def test_constructor_with_selector(self):
sel = Selector(text=u"<html><body><div>marta</div></body></html>")
l = TestItemLoader(selector=sel)
self.assert_(l.selector is sel)
l.add_xpath('name', '//div/text()')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
def test_constructor_with_selector_css(self):
sel = Selector(text=u"<html><body><div>marta</div></body></html>")
l = TestItemLoader(selector=sel)
self.assert_(l.selector is sel)
l.add_css('name', 'div::text')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
def test_constructor_with_response(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_xpath('name', '//div/text()')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
def test_constructor_with_response_css(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_css('name', 'div::text')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.add_css('url', 'a::attr(href)')
self.assertEqual(l.get_output_value('url'), [u'http://www.scrapy.org'])
# combining/accumulating CSS selectors and XPath expressions
l.add_xpath('name', '//div/text()')
self.assertEqual(l.get_output_value('name'), [u'Marta', u'Marta'])
l.add_xpath('url', '//img/@src')
self.assertEqual(l.get_output_value('url'), [u'http://www.scrapy.org', u'/images/logo.png'])
def test_add_xpath_re(self):
l = TestItemLoader(response=self.response)
l.add_xpath('name', '//div/text()', re='ma')
self.assertEqual(l.get_output_value('name'), [u'Ma'])
def test_replace_xpath(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_xpath('name', '//div/text()')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.replace_xpath('name', '//p/text()')
self.assertEqual(l.get_output_value('name'), [u'Paragraph'])
l.replace_xpath('name', ['//p/text()', '//div/text()'])
self.assertEqual(l.get_output_value('name'), [u'Paragraph', 'Marta'])
def test_get_xpath(self):
l = TestItemLoader(response=self.response)
self.assertEqual(l.get_xpath('//p/text()'), [u'paragraph'])
self.assertEqual(l.get_xpath('//p/text()', TakeFirst()), u'paragraph')
self.assertEqual(l.get_xpath('//p/text()', TakeFirst(), re='pa'), u'pa')
self.assertEqual(l.get_xpath(['//p/text()', '//div/text()']), [u'paragraph', 'marta'])
def test_replace_xpath_multi_fields(self):
l = TestItemLoader(response=self.response)
l.add_xpath(None, '//div/text()', TakeFirst(), lambda x: {'name': x})
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.replace_xpath(None, '//p/text()', TakeFirst(), lambda x: {'name': x})
self.assertEqual(l.get_output_value('name'), [u'Paragraph'])
def test_replace_xpath_re(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_xpath('name', '//div/text()')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.replace_xpath('name', '//div/text()', re='ma')
self.assertEqual(l.get_output_value('name'), [u'Ma'])
def test_add_css_re(self):
l = TestItemLoader(response=self.response)
l.add_css('name', 'div::text', re='ma')
self.assertEqual(l.get_output_value('name'), [u'Ma'])
l.add_css('url', 'a::attr(href)', re='http://(.+)')
self.assertEqual(l.get_output_value('url'), [u'www.scrapy.org'])
def test_replace_css(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_css('name', 'div::text')
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.replace_css('name', 'p::text')
self.assertEqual(l.get_output_value('name'), [u'Paragraph'])
l.replace_css('name', ['p::text', 'div::text'])
self.assertEqual(l.get_output_value('name'), [u'Paragraph', 'Marta'])
l.add_css('url', 'a::attr(href)', re='http://(.+)')
self.assertEqual(l.get_output_value('url'), [u'www.scrapy.org'])
l.replace_css('url', 'img::attr(src)')
self.assertEqual(l.get_output_value('url'), [u'/images/logo.png'])
def test_get_css(self):
l = TestItemLoader(response=self.response)
self.assertEqual(l.get_css('p::text'), [u'paragraph'])
self.assertEqual(l.get_css('p::text', TakeFirst()), u'paragraph')
self.assertEqual(l.get_css('p::text', TakeFirst(), re='pa'), u'pa')
self.assertEqual(l.get_css(['p::text', 'div::text']), [u'paragraph', 'marta'])
self.assertEqual(l.get_css(['a::attr(href)', 'img::attr(src)']),
[u'http://www.scrapy.org', u'/images/logo.png'])
def test_replace_css_multi_fields(self):
l = TestItemLoader(response=self.response)
l.add_css(None, 'div::text', TakeFirst(), lambda x: {'name': x})
self.assertEqual(l.get_output_value('name'), [u'Marta'])
l.replace_css(None, 'p::text', TakeFirst(), lambda x: {'name': x})
self.assertEqual(l.get_output_value('name'), [u'Paragraph'])
l.add_css(None, 'a::attr(href)', TakeFirst(), lambda x: {'url': x})
self.assertEqual(l.get_output_value('url'), [u'http://www.scrapy.org'])
l.replace_css(None, 'img::attr(src)', TakeFirst(), lambda x: {'url': x})
self.assertEqual(l.get_output_value('url'), [u'/images/logo.png'])
def test_replace_css_re(self):
l = TestItemLoader(response=self.response)
self.assert_(l.selector)
l.add_css('url', 'a::attr(href)')
self.assertEqual(l.get_output_value('url'), [u'http://www.scrapy.org'])
l.replace_css('url', 'a::attr(href)', re='http://www\.(.+)')
self.assertEqual(l.get_output_value('url'), [u'scrapy.org'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
soldag/home-assistant | tests/components/cover/test_intent.py | 24 | 1584 | """The tests for the cover platform."""
from homeassistant.components.cover import (
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
intent as cover_intent,
)
from homeassistant.helpers import intent
from tests.common import async_mock_service
async def test_open_cover_intent(hass):
"""Test HassOpenCover intent."""
await cover_intent.async_setup_intents(hass)
hass.states.async_set("cover.garage_door", "closed")
calls = async_mock_service(hass, "cover", SERVICE_OPEN_COVER)
response = await intent.async_handle(
hass, "test", "HassOpenCover", {"name": {"value": "garage door"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Opened garage door"
assert len(calls) == 1
call = calls[0]
assert call.domain == "cover"
assert call.service == "open_cover"
assert call.data == {"entity_id": "cover.garage_door"}
async def test_close_cover_intent(hass):
"""Test HassCloseCover intent."""
await cover_intent.async_setup_intents(hass)
hass.states.async_set("cover.garage_door", "open")
calls = async_mock_service(hass, "cover", SERVICE_CLOSE_COVER)
response = await intent.async_handle(
hass, "test", "HassCloseCover", {"name": {"value": "garage door"}}
)
await hass.async_block_till_done()
assert response.speech["plain"]["speech"] == "Closed garage door"
assert len(calls) == 1
call = calls[0]
assert call.domain == "cover"
assert call.service == "close_cover"
assert call.data == {"entity_id": "cover.garage_door"}
| apache-2.0 |
joshumax/CoLinux64 | src/build.comake.py | 1 | 4278 | # This is not a standalone Python script, but a build declaration file
# to be read by bin/make.py. Please run bin/make.py --help.
import os
from comake.settings import settings
settings.arch = os.getenv('COLINUX_HOST_ARCH')
if not settings.arch:
settings.arch = 'i386'
print "Target architecture not specified, defaulting to %s" % (settings.arch, )
current_arch_symlink = target_pathname(pathjoin('colinux', 'arch', 'current'))
if os.path.exists(current_arch_symlink):
os.unlink(current_arch_symlink)
os.symlink(settings.arch, current_arch_symlink)
settings.host_os = os.getenv('COLINUX_HOST_OS')
if not settings.host_os:
settings.host_os = 'winnt'
print "Target OS not specified, defaulting to %s" % (settings.host_os, )
current_os_symlink = target_pathname(pathjoin('colinux', 'os', 'current'))
if os.path.exists(current_os_symlink):
os.unlink(current_os_symlink)
os.symlink(settings.host_os, current_os_symlink)
settings.cflags = os.getenv('COLINUX_CFLAGS')
if not settings.cflags:
settings.cflags = ''
settings.lflags = os.getenv('COLINUX_LFLAGS')
if not settings.lflags:
settings.lflags = ''
# Setup "i686-co-linux", if local gcc can't use for linux kernel
settings.gcc_guest_target = os.getenv('COLINUX_GCC_GUEST_TARGET');
compiler_defines = dict(
COLINUX_FILE_ID='0',
COLINUX=None,
CO_HOST_API=None,
COLINUX_DEBUG=None,
COLINUX_HOST_ARCH=settings.host_os,
)
if settings.host_os == 'winnt':
if settings.arch == 'x86_64':
settings.gcc_host_target = 'x86_64-w64-mingw32'
compiler_flags = ['-D_AMD64_', '-fms-extensions']
else:
settings.gcc_host_target = 'i686-pc-mingw32'
compiler_flags = ['-mpush-args', '-mno-accumulate-outgoing-args']
compiler_defines['WINVER'] = '0x0500'
cross_compilation_prefix = settings.gcc_host_target + '-'
else:
if settings.gcc_guest_target:
cross_compilation_prefix = settings.gcc_guest_target + '-'
else:
cross_compilation_prefix = ''
compiler_flags = []
settings.target_kernel_source = getenv('COLINUX_TARGET_KERNEL_SOURCE')
if not settings.target_kernel_source:
settings.target_kernel_source = getenv('COLINUX_TARGET_KERNEL_PATH')
if not settings.target_kernel_source:
print
print "COLINUX_TARGET_KERNEL_PATH not set. Please set this environment variable to the"
print "pathname of a coLinux-enabled kernel source tree, i.e, a Linux kernel tree that"
print "is patched with the patch file which is under the patch/ directory."
raise BuildCancelError()
settings.target_kernel_build = getenv('COLINUX_TARGET_KERNEL_BUILD')
# Handle headers from in source and out of tree builds
if not settings.target_kernel_build:
settings.target_kernel_build = settings.target_kernel_source
if settings.target_kernel_build == settings.target_kernel_source:
settings.target_kernel_includes = [
pathjoin(settings.target_kernel_source, 'include') ]
else:
settings.target_kernel_includes = [
pathjoin(settings.target_kernel_build, 'include'),
]
x = pathjoin(settings.target_kernel_build, 'include2')
if os.path.exists(x):
settings.target_kernel_includes += [ x ]
x = pathjoin(settings.target_kernel_source, 'arch/x86/include')
if os.path.exists(x):
settings.target_kernel_includes += [ x ]
settings.target_kernel_includes += [ pathjoin(settings.target_kernel_source, 'include') ]
if not hasattr(settings, 'final_build_target'):
settings.final_build_target = 'executables'
targets['build'] = Target(
inputs=[Input('colinux/os/%s/build/%s' % (settings.host_os,
settings.final_build_target))],
options=Options(
overriders=dict(
cross_compilation_prefix=cross_compilation_prefix,
),
appenders=dict(
compiler_flags=[
'-Wno-trigraphs', '-fno-strict-aliasing', '-Wall',
settings.cflags,
] + compiler_flags,
linker_flags=[
settings.lflags,
],
compiler_includes=[
'src',
] + settings.target_kernel_includes,
compiler_defines=compiler_defines,
)
),
tool = Empty(),
)
| gpl-2.0 |
ariegg/webiopi-drivers | buses/i2c/mcp2221/i2cmcpwindll.py | 1 | 4154 | # Copyright 2016 Andreas Riegg - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
#
# Changelog
#
# 1.0 2016-06-27 Initial release.
#
# 1.1 2016-07-28 Added compatibilty with slave address detect feature from WebIOPi 0.7.22
#
# Implementation and usage remarks
#
# Implements I2C device connectivity using the MCP2221 USB <-> I2C chip.
#
# This version is for I2C MCP2221 WinDLL usage on Windows OS.
# CAUTION: This version does not work on any LINUX OS!!
# The Microchip DLL works with 8 bit slave addresses only, so convert it
# automatically.
#
# A singleton for the opened DLL object named MCPDLL is used here.
#
from webiopi.devices.bus import Bus, I2C_Bus
from webiopi.utils.types import toint
from webiopi.utils.logger import debug
import ctypes
from ctypes import c_ubyte, c_uint
MCPDLL = None
class I2C_MCP2221_WINDLL(I2C_Bus):
def __init__(self, slave, speed=100000, dev="windll:", dllpath="", dllname="MCP2221DLL-UM_x86"):
self.slave = slave
self.speed = toint(speed)
if self.speed > 400000:
raise ValueError("Maximum I2C speed for MCP2221 is 400,000.0 Hz (%s Hz is given)" % '{:,.1f}'.format(self.speed))
self.dllpath = dllpath
self.dllname = dllname
Bus.__init__(self, "I2C", dev + dllpath + dllname)
I2C_Bus.__init__(self, slave)
debug("Attached I2C device - %s" % self.__str__())
def __str__(self):
return "%s (slave=0x%02X speed=%s dev=%s)" % (self.__class__.__name__, self.slave, '{:,.1f}'.format(self.speed), self.device)
#---------- BUS open() and close() reimplementation to handle dll file singleton ----------
# TODO: Correct handling of open/close with multiple slaves dynamically
def open(self):
global MCPDLL
if MCPDLL is None:
debug("Opening I2C bus device - %s(dev=%s)" % (self.__class__.__name__, self.device))
try:
MCPDLL = ctypes.WinDLL(self.dllpath + self.dllname)
except WindowsError:
raise Exception("Cannot load library %s" % self.device)
MCPDLL.DllInit()
isConnected = MCPDLL.GetConnectionStatus()
if not isConnected:
raise Exception("Cannot connect to MCP2221 chip via %s" % self.device)
def close(self):
global MCPDLL
if MCPDLL is not None:
MCPDLL.StopI2cDataTransfer()
debug("Closing I2C bus device - %s(dev=%s)" % (self.__class__.__name__, self.device))
handle = MCPDLL._handle
ctypes.windll.kernel32.FreeLibrary(handle)
MCPDLL = None
I2C_Bus.close(self)
#---------- Basic read/write communication via MCP2221 DLL API calls ----------
def readBytes(self, size=1):
global MCPDLL
buff = (c_ubyte * size)(*bytearray(size))
success = MCPDLL.ReadI2cData(c_ubyte((self.slave << 1) + 1), buff, c_uint(size), c_uint(self.speed))
if success < 0: #Todo: some returncodes may be worth a retry
raise Exception("Read error %s returncode: %s" % (self.device, success))
return bytearray(buff)
def writeBytes(self, data):
global MCPDLL
size = len(data)
success = MCPDLL.WriteI2cData(c_ubyte(self.slave << 1), (c_ubyte * size)(*data), c_uint(size), c_uint(self.speed))
if success < 0: #Todo: some returncodes may be worth a retry
raise Exception("Write error %s returncode: %s" % (self.device, success))
| apache-2.0 |
xiaoyaozi5566/DiamondCache | src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_with_duplication.py | 91 | 2495 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop MOVDDUP_XMM_XMM {
movfp xmmh, xmmlm, dataSize=8
movfp xmml, xmmlm, dataSize=8
};
def macroop MOVDDUP_XMM_M {
ldfp xmml, seg, sib, disp, dataSize=8
movfp xmmh, xmml, dataSize=8
};
def macroop MOVDDUP_XMM_P {
rdip t7
ldfp xmml, seg, riprel, disp, dataSize=8
movfp xmmh, xmml, dataSize=8
};
# MOVSLDUP
# MOVSHDUP
'''
| bsd-3-clause |
jimberlage/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/base.py | 4 | 5534 | import os
import platform
import socket
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from ..wptcommandline import require_arg # noqa: F401
here = os.path.split(__file__)[0]
def inherit(super_module, child_globals, product_name):
super_wptrunner = super_module.__wptrunner__
child_globals["__wptrunner__"] = child_wptrunner = deepcopy(super_wptrunner)
child_wptrunner["product"] = product_name
for k in ("check_args", "browser", "browser_kwargs", "executor_kwargs",
"env_extras", "env_options"):
attr = super_wptrunner[k]
child_globals[attr] = getattr(super_module, attr)
for v in super_module.__wptrunner__["executor"].values():
child_globals[v] = getattr(super_module, v)
if "run_info_extras" in super_wptrunner:
attr = super_wptrunner["run_info_extras"]
child_globals[attr] = getattr(super_module, attr)
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port(start_port, exclude=None):
"""Get the first port number after start_port (inclusive) that is
not currently bound.
:param start_port: Integer port number at which to start testing.
:param exclude: Set of port numbers to skip"""
port = start_port
while True:
if exclude and port in exclude:
port += 1
continue
s = socket.socket()
try:
s.bind(("127.0.0.1", port))
except socket.error:
port += 1
else:
return port
finally:
s.close()
def browser_command(binary, args, debug_info):
if debug_info:
if debug_info.requiresEscapedArgs:
args = [item.replace("&", "\\&") for item in args]
debug_args = [debug_info.path] + debug_info.args
else:
debug_args = []
command = [binary] + args
return debug_args, command
class BrowserError(Exception):
pass
class Browser(object):
__metaclass__ = ABCMeta
process_cls = None
init_timeout = 30
def __init__(self, logger):
"""Abstract class serving as the basis for Browser implementations.
The Browser is used in the TestRunnerManager to start and stop the browser
process, and to check the state of that process. This class also acts as a
context manager, enabling it to do browser-specific setup at the start of
the testrun and cleanup after the run is complete.
:param logger: Structured logger to use for output.
"""
self.logger = logger
def __enter__(self):
self.setup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def setup(self):
"""Used for browser-specific setup that happens at the start of a test run"""
pass
def settings(self, test):
return {}
@abstractmethod
def start(self, group_metadata, **kwargs):
"""Launch the browser object and get it into a state where is is ready to run tests"""
pass
@abstractmethod
def stop(self, force=False):
"""Stop the running browser process."""
pass
@abstractmethod
def pid(self):
"""pid of the browser process or None if there is no pid"""
pass
@abstractmethod
def is_alive(self):
"""Boolean indicating whether the browser process is still running"""
pass
def setup_ssl(self, hosts):
"""Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
raise NotImplementedError("ssl testing not supported")
def cleanup(self):
"""Browser-specific cleanup that is run after the testrun is finished"""
pass
def executor_browser(self):
"""Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
with which it should be instantiated"""
return ExecutorBrowser, {}
def check_for_crashes(self):
"""Check for crashes that didn't cause the browser process to terminate"""
return False
def log_crash(self, process, test):
"""Return a list of dictionaries containing information about crashes that happend
in the browser, or an empty list if no crashes occurred"""
self.logger.crash(process, test)
class NullBrowser(Browser):
def __init__(self, logger, **kwargs):
super(NullBrowser, self).__init__(logger)
def start(self, **kwargs):
"""No-op browser to use in scenarios where the TestRunnerManager shouldn't
actually own the browser process (e.g. Servo where we start one browser
per test)"""
pass
def stop(self, force=False):
pass
def pid(self):
return None
def is_alive(self):
return True
def on_output(self, line):
raise NotImplementedError
class ExecutorBrowser(object):
def __init__(self, **kwargs):
"""View of the Browser used by the Executor object.
This is needed because the Executor runs in a child process and
we can't ship Browser instances between processes on Windows.
Typically this will have a few product-specific properties set,
but in some cases it may have more elaborate methods for setting
up the browser from the runner process.
"""
for k, v in kwargs.iteritems():
setattr(self, k, v)
| mpl-2.0 |
rockaboxmedia/pexpect | tests/pexpectTest.py | 3 | 1316 | #!/usr/bin/env python
import os, time, pexpect, sys
def getProcessResults(cmd, timeLimit=20):
"""
executes 'cmd' as a child process and returns the child's output,
the duration of execution, and the process exit status. Aborts if
child process does not generate output for 'timeLimit' seconds.
"""
output = ""
startTime = time.time()
child = pexpect.spawn(cmd, timeout=10)
child.logfile = sys.stdout
while 1:
try:
# read_nonblocking will add to 'outout' one byte at a time
# newlines can show up as '\r\n' so we kill any '\r's which
# will mess up the formatting for the viewer
output += child.read_nonblocking(timeout=timeLimit).replace("\r","")
except pexpect.EOF, e:
print str(e)
# process terminated normally
break
except pexpect.TIMEOUT, e:
print str(e)
output += "\nProcess aborted by FlashTest after %s seconds.\n" % timeLimit
print child.isalive()
child.kill(9)
break
endTime = time.time()
child.close(force=True)
duration = endTime - startTime
exitStatus = child.exitstatus
return (output, duration, exitStatus)
cmd = "./ticker.py"
result, duration, exitStatus = getProcessResults(cmd)
print "result: %s" % result
print "duration: %s" % duration
print "exit-status: %s" % exitStatus
| mit |
SpamExperts/OrangeAssassin | oa/plugins/base.py | 2 | 6639 | """Base for PAD plugins."""
from __future__ import absolute_import
from builtins import tuple
from builtins import object
try:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
except ImportError:
create_engine = None
sessionmaker = None
from collections import defaultdict
import oa.conf
def dbi_to_mysql(dsn, user, password):
conection_dates = defaultdict(int)
dummy, driver, connection = dsn.split(":", 2)
if driver.lower() == "mysql":
driver = "mysql"
db_name, hostname = connection.split(":", 1)
conection_dates["driver"] = driver
conection_dates["hostname"] = hostname
conection_dates["db_name"] = db_name
if not user or not password:
return conection_dates
conection_dates["user"] = user
conection_dates["password"] = password
return conection_dates
def dbi_to_alchemy(dsn, user, password):
"""Convert perl DBI setting to SQLAlchemy settings."""
dummy, driver, connection = dsn.split(":", 2)
if driver.lower() == "mysql":
driver = "mysql+pymysql"
db_name, hostname = connection.split(":", 1)
elif driver.lower() == "pg":
driver = "postgresql"
values = dict(item.split("=") for item in connection.split(";"))
db_name = values.get("dbname", "spamassassin")
hostname = values.get("host", "localhost")
if "port" in values:
hostname = "%s:%s" % (hostname, values["port"])
elif driver.lower() == "sqlite":
driver = "sqlite"
user, password, hostname = "", "", ""
values = dict(item.split("=") for item in connection.split(";"))
db_name = values["dbname"]
else:
return ""
if not user or not password:
return "%s://%s/%s" % (driver, hostname, db_name)
return "%s://%s:%s@%s/%s" % (driver, user, password, hostname, db_name)
class BasePlugin(oa.conf.Conf, object):
"""Abstract class for plugins. All plugins must inherit from this class.
This exposes methods to methods to store data and configuration options
in the "global" context and the "local" context.
* The "global" context is loaded once when the configuration is parsed
and persists throughout until the plugin is reloaded.
* The "local" context is stored per message and each new message parsed
has its one context.
The methods automatically stores the data under the plugin names to ensure
that there are no name clashes between plugins.
The plugin can also define eval rules by implementing a method and adding
it to the eval_rules list. These will be registered after the plugin has
been initialized.
"""
eval_rules = tuple()
# Defines any new rules that the plugins implements.
cmds = None
# See oa.conf.Conf for details on options.
options = None
# Database connection fields, each plugin should set their own if they need them
dsn = None
sql_username = ""
sql_password = ""
def __init__(self, ctxt):
self.path_to_plugin = None
super(BasePlugin, self).__init__(ctxt)
def finish_parsing_start(self, results):
"""Called when the configuration parsing has finished but before
the has actually been initialized from the parsed data.
This can be used to insert new data after parsing.
:param results: A dictionary that maps the rule names to the
rest of the data extracted from the configuration (e.g. the
score, description etc.)
:return: Nothing
"""
# XXX The name method for this is horrible, but it's likely better to have
# XXX it the same as SA.
def finish_parsing_end(self, ruleset):
"""Called when the configuration parsing has finished, but before the
post-parsing. This hook can be used for e.g. to add rules to the
ruleset.
By default this prepares the SQLAlchemy engine if the plugin has any
set.
"""
connect_string = None
self["engine"] = None
if self.dsn:
if self.dsn.upper().startswith("DBI"):
# Convert from SA format.
user = self.sql_username
password = self.sql_password
if not create_engine:
self["engine"] = dbi_to_mysql(self.dsn, user, password)
else:
connect_string = dbi_to_alchemy(self.dsn, user, password)
elif self.dsn:
# The connect string is already in the correct format
connect_string = self.dsn
if connect_string is not None:
self["engine"] = create_engine(connect_string)
def get_engine(self):
return self["engine"]
def get_session(self):
"""Open a new SQLAlchemy session."""
engine = self["engine"]
return sessionmaker(bind=engine)()
def check_start(self, msg):
"""Called before the metadata is extracted from the message. The
message object passed will only have raw_msg and msg available.
May be overridden.
"""
def extract_metadata(self, msg, payload, text, part):
"""Called while the message metadata is extracted for every message
part. If the part contains text, corresponding payload is provided,
else it will be None.
May be overridden.
"""
def parsed_metadata(self, msg):
"""The message has been parsed and all the information can be accessed
by the plugin.
May be overridden.
"""
def check_end(self, ruleset, msg):
"""The message check operation has just finished, and the results are
about to be returned to the caller
May be overridden.
"""
def auto_learn_discriminator(self, ruleset, msg):
"""All message operations have finished and it can be checked for
submission to autolearning systems
May be overridden.
"""
def plugin_report(self, msg):
"""Called when a message should be reported as spam.
May be overridden.
"""
def plugin_revoke(self, msg):
"""Called when a message should be reported as ham.
May be overridden.
"""
def parse_config(self, key, value):
"""Parse a config line that the normal parses doesn't know how to
interpret.
Use self.inhibit_further_callbacks to stop other plugins from
processing this line.
May be overridden.
"""
super(BasePlugin, self).parse_config(key, value)
| apache-2.0 |
goddino/libjingle | trunk/build/android/pylib/chrome_test_server_spawner.py | 8 | 16822 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
import constants
import ports
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(constants.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(constants.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for attempt in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortStatus(port, expected_status):
"""Returns True if port has expected_status.
Args:
port: the port number.
expected_status: boolean of expected status.
Returns:
Returns True if the status is expected. Otherwise returns False.
"""
return _WaitUntil(lambda: ports.IsHostPortUsed(port) == expected_status)
def _CheckDevicePortStatus(adb, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(adb, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, adb, tool, forwarder, build_type):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
forwarder: instance of Forwarder.
build_type: 'Release' or 'Debug'.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
self._test_server_forwarder = forwarder
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.DIR_SOURCE_ROOT, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.DIR_SOURCE_ROOT, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.DIR_SOURCE_ROOT, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(command)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
self._test_server_forwarder.Run(
[(0, self.host_port)], self.tool, '127.0.0.1')
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = self._test_server_forwarder.DevicePortForHostPort(
self.host_port)
if device_port and _CheckDevicePortStatus(self.adb, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
self._test_server_forwarder.UnmapDevicePort(self.forwarder_device_port)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool,
self.server.forwarder,
self.server.build_type)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, adb, tool, forwarder,
build_type):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.adb = adb
self.server.tool = tool
self.server.forwarder = forwarder
self.server.test_server_instance = None
self.server.build_type = build_type
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
| bsd-3-clause |
nijinashok/sos | sos/plugins/activemq.py | 1 | 1722 | # Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class ActiveMq(Plugin, DebianPlugin):
"""ActiveMQ message broker
"""
plugin_name = 'activemq'
profiles = ('openshift',)
packages = ('activemq', 'activemq-core')
files = ('/var/log/activemq',)
def setup(self):
if self.get_option("all_logs"):
self.add_copy_spec(list(self.files))
else:
self.add_copy_spec([
"/var/log/activemq/activemq.log",
"/var/log/activemq/wrapper.log"
])
def postproc(self):
# activemq.xml contains credentials in this form:
# <authenticationUser ... password="changeme" ... />
self.do_file_sub(
'/etc/activemq/activemq.xml',
r'(\s*password=")[^"]*(".*)',
r"\1******\2"
)
class RedHatActiveMq(ActiveMq, RedHatPlugin):
def setup(self):
super(RedHatActiveMq, self).setup()
self.add_copy_spec([
'/etc/sysconfig/activemq',
'/etc/activemq/activemq.xml'
])
class UbuntuActiveMq(ActiveMq, UbuntuPlugin):
def setup(self):
super(UbuntuActiveMq, self).setup()
self.add_copy_spec([
'/etc/activemq',
'/etc/default/activemq'
])
| gpl-2.0 |
redhat-performance/tuned | tests/unit/monitors/test_base.py | 1 | 2873 | import unittest
import tuned.monitors.base
class MockMonitor(tuned.monitors.base.Monitor):
@classmethod
def _init_available_devices(cls):
cls._available_devices = set(["a", "b"])
@classmethod
def update(cls):
for device in ["a", "b"]:
cls._load.setdefault(device, 0)
cls._load[device] += 1
class MonitorBaseClassTestCase(unittest.TestCase):
def test_fail_base_class_init(self):
with self.assertRaises(NotImplementedError):
tuned.monitors.base.Monitor()
def test_update_fail_with_base_class(self):
with self.assertRaises(NotImplementedError):
tuned.monitors.base.Monitor.update()
def test_available_devices(self):
monitor = MockMonitor()
devices = MockMonitor.get_available_devices()
self.assertEqual(devices, set(["a", "b"]))
monitor.cleanup()
def test_registering_instances(self):
monitor = MockMonitor()
self.assertIn(monitor, MockMonitor.instances())
monitor.cleanup()
self.assertNotIn(monitor, MockMonitor.instances())
def test_init_with_devices(self):
monitor = MockMonitor()
self.assertSetEqual(set(["a", "b"]), monitor.devices)
monitor.cleanup()
monitor = MockMonitor(["a"])
self.assertSetEqual(set(["a"]), monitor.devices)
monitor.cleanup()
monitor = MockMonitor([])
self.assertSetEqual(set(), monitor.devices)
monitor.cleanup()
monitor = MockMonitor(["b", "x"])
self.assertSetEqual(set(["b"]), monitor.devices)
monitor.cleanup()
def test_add_device(self):
monitor = MockMonitor(["a"])
self.assertSetEqual(set(["a"]), monitor.devices)
monitor.add_device("x")
self.assertSetEqual(set(["a"]), monitor.devices)
monitor.add_device("b")
self.assertSetEqual(set(["a", "b"]), monitor.devices)
monitor.cleanup()
def test_remove_device(self):
monitor = MockMonitor()
self.assertSetEqual(set(["a", "b"]), monitor.devices)
monitor.remove_device("a")
self.assertSetEqual(set(["b"]), monitor.devices)
monitor.remove_device("x")
self.assertSetEqual(set(["b"]), monitor.devices)
monitor.remove_device("b")
self.assertSetEqual(set(), monitor.devices)
monitor.cleanup()
def test_get_load_from_enabled(self):
monitor = MockMonitor()
load = monitor.get_load()
self.assertIn("a", load)
self.assertIn("b", load)
monitor.remove_device("a")
load = monitor.get_load()
self.assertNotIn("a", load)
self.assertIn("b", load)
monitor.remove_device("b")
load = monitor.get_load()
self.assertDictEqual({}, load)
monitor.cleanup()
def test_refresh_of_updating_devices(self):
monitor1 = MockMonitor(["a"])
self.assertSetEqual(set(["a"]), MockMonitor._updating_devices)
monitor2 = MockMonitor(["a", "b"])
self.assertSetEqual(set(["a", "b"]), MockMonitor._updating_devices)
monitor1.cleanup()
self.assertSetEqual(set(["a", "b"]), MockMonitor._updating_devices)
monitor2.cleanup()
self.assertSetEqual(set(), MockMonitor._updating_devices)
| gpl-2.0 |
mscuthbert/abjad | abjad/tools/handlertools/RepeatedMarkupHandler.py | 2 | 1215 | # -*- encoding: utf-8 -*-
from abjad.tools import markuptools
from abjad.tools import scoretools
from abjad.tools.handlertools.ArticulationHandler import ArticulationHandler
class RepeatedMarkupHandler(ArticulationHandler):
r'''Repeated markup handler.
'''
### CLASS ATTRIBUTES ###
__slots__ = (
'_markups',
)
### INITIALIZER ###
def __init__(self, markups=None):
if markups is not None:
markups = [markuptools.Markup(_) for _ in markups]
markups = tuple(markups)
self._markups = markups
### SPECIAL METHODS ###
def __call__(self, expr):
r'''Calls handler on `expr`.
Returns none.
'''
classes = (scoretools.Note, scoretools.Chord)
markups = datastructuretools.CyclicTuple(self.markups)
for i, leaf in enumerate(
scoretools.iterate_components_forward_in_expr(expr, classes)):
markup = markup[i]
markup = markuptools.Markup(markup)
markup(leaf)
### PUBLIC PROPERTIES ###
@property
def markups(self):
r'''Gets markups of handler.
Returns tuple or none.
'''
return self._markups | gpl-3.0 |
dit/dit | tests/pid/test_imin.py | 1 | 1262 | """
Tests for dit.pid.imin.
"""
import pytest
from dit.pid.measures.imin import PID_WB
from dit.pid.distributions import bivariates, trivariates
def test_pid_wb1():
"""
Test imin on a generic distribution.
"""
d = bivariates['prob. 1']
pid = PID_WB(d, ((0,), (1,)), (2,))
assert pid[((0,), (1,))] == pytest.approx(0.019973094021974794)
assert pid[((0,),)] == pytest.approx(0.15097750043269376)
assert pid[((1,),)] == pytest.approx(0.0)
assert pid[((0, 1),)] == pytest.approx(0.0)
def test_pid_wb2():
"""
Test imin on another generic distribution.
"""
d = trivariates['sum']
pid = PID_WB(d, [[0], [1], [2]], [3])
for atom in pid._lattice:
if atom == ((0,), (1,), (2,)):
assert pid[atom] == pytest.approx(0.31127812445913294)
elif atom == ((0, 1), (0, 2), (1, 2)):
assert pid[atom] == pytest.approx(0.5)
elif atom == ((0, 1, 2),):
assert pid[atom] == pytest.approx(1.0)
else:
assert pid[atom] == pytest.approx(0.0)
def test_pid_wb3():
"""
Test imin on a generic distribution.
"""
d = bivariates['jeff']
pid = PID_WB(d)
assert pid.complete
assert pid.nonnegative
assert pid.consistent
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.