repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
4022321818/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testhelpers.py | 737 | 25793 | import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
StratusLab/client | api/code/src/main/python/stratuslab/Compressor.py | 1 | 4395 | #
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Util
import hashlib
import gzip
import bz2
from Exceptions import ExecutionException
class Compressor(object):
compressionFormats = ('gz', 'bz2')
@staticmethod
def compress(filename):
pass
@staticmethod
def inflate(filename):
cmd = Compressor._getInflateCommand(filename)
ret = Util.execute([cmd, filename])
if ret != 0:
raise ExecutionException('Error inflating file: %s' % filename)
@staticmethod
def getCompressionFormat(filename):
"""If the file suffix (ignoring case) is in the list of supported
compression formats, this returns the compression format.
Otherwise, it returns the empty string. """
suffix = Util.fileGetExtension(filename).lower()
if suffix in Compressor.compressionFormats:
return suffix
else:
return ''
@staticmethod
def openCompressedFile(filename, options='rb'):
"""Returns an open file handle for the given filename. If the
filename ends with a gzip or bzip2 suffix, then the file is
opened as a gzip or bzip2 file. Otherwise it is opened
without any compression filter."""
type = Compressor.getCompressionFormat(filename)
if (type == 'gz'):
return gzip.open(filename, options)
elif (type == 'bz2'):
return bz2.BZ2File(filename, options)
else:
return open(filename, options)
@staticmethod
def checksum_file(filename, checksums=[], chunk_size=1024*1024*10):
"""Return dictionary of checksums."""
return Compressor._checksum_f(Compressor.openCompressedFile(filename, 'rb'), checksums, chunk_size)
@staticmethod
def _checksum_f(f, checksums=[], chunk_size=1024*1024*10):
"""Returns a tuple with the file size in bytes and a dictionary of
checksums. The file named by the file handle will be fully read
if checksums are requested. This method will close the file
handle."""
#
# "with" cannot be used here because the gzip library in python
# 2.6 doesn't support the __exit__ attribute needed for it
#
try:
if not checksums:
return {}
digesters = []
try:
digesters = map(hashlib.new, checksums)
except ValueError as e:
raise ExecutionException('%s' % e)
bytes = 0
for chunk in iter((lambda:f.read(chunk_size)),''):
bytes += len(chunk)
for digester in digesters:
digester.update(chunk)
digests = [d.hexdigest() for d in digesters]
return (bytes, dict(zip(checksums, digests)))
finally:
f.close()
@staticmethod
def _getCompressionCommand(filename):
format = filename.split('.')[-1]
return Compressor._getCompressionCommandByFormat(format)
@staticmethod
def _getCompressionCommandByFormat(format):
if format == 'gz':
cmd = 'gzip'
elif format == 'bz2':
cmd = 'bzip2'
else:
raise NotImplementedError('Unknown compression format: %s' % format)
return cmd
@staticmethod
def _getInflateCommand(filename):
format = filename.split('.')[-1]
if format == 'gz':
cmd = 'gunzip'
elif format == 'bz2':
cmd = 'bunzip2'
else:
raise NotImplementedError('Unknown compression format: %s' % format)
return cmd
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/compiler/tests/stack_ops_test.py | 10 | 4361 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.platform import test
class StackOpTest(XLATestCase):
def testStackPushPop(self):
with self.test_session(), self.test_scope():
size = array_ops.placeholder(dtypes.int32)
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval({size: 5, v: [[4.0, 5.0]]}))
def testStackPushPopSwap(self):
with self.test_session(), self.test_scope():
a = np.arange(2000)
x = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, c1.eval({x: a}))
def testMultiStack(self):
with self.test_session(), self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval({v: 4.0}))
def testSameNameStacks(self):
"""Different stacks with the same name do not interfere."""
with self.test_session() as sess, self.test_scope():
v1 = array_ops.placeholder(dtypes.float32)
v2 = array_ops.placeholder(dtypes.float32)
h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = sess.run([pop1, pop2], {v1: 4.0, v2: 5.0})
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
def testCloseStack(self):
with self.test_session() as sess, self.test_scope():
size = array_ops.placeholder(dtypes.int32)
h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {size: 5})
def testPushCloseStack(self):
with self.test_session() as sess, self.test_scope():
v = array_ops.placeholder(dtypes.float32)
h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, v)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
sess.run(c1, {v: [[4.0, 5.0]]})
if __name__ == "__main__":
test.main()
| apache-2.0 |
muxi/grpc | src/python/grpcio_tests/tests/testing/_server_test.py | 13 | 9167 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import grpc
import grpc_testing
from tests.testing import _application_common
from tests.testing import _application_testing_common
from tests.testing import _server_application
from tests.testing.proto import services_pb2
class FirstServiceServicerTest(unittest.TestCase):
def setUp(self):
self._real_time = grpc_testing.strict_real_time()
self._fake_time = grpc_testing.strict_fake_time(time.time())
servicer = _server_application.FirstServiceServicer()
descriptors_to_servicers = {
_application_testing_common.FIRST_SERVICE: servicer
}
self._real_time_server = grpc_testing.server_from_dictionary(
descriptors_to_servicers, self._real_time)
self._fake_time_server = grpc_testing.server_from_dictionary(
descriptors_to_servicers, self._fake_time)
def test_successful_unary_unary(self):
rpc = self._real_time_server.invoke_unary_unary(
_application_testing_common.FIRST_SERVICE_UNUN, (),
_application_common.UNARY_UNARY_REQUEST, None)
initial_metadata = rpc.initial_metadata()
response, trailing_metadata, code, details = rpc.termination()
self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response)
self.assertIs(code, grpc.StatusCode.OK)
def test_successful_unary_stream(self):
rpc = self._real_time_server.invoke_unary_stream(
_application_testing_common.FIRST_SERVICE_UNSTRE, (),
_application_common.UNARY_STREAM_REQUEST, None)
initial_metadata = rpc.initial_metadata()
trailing_metadata, code, details = rpc.termination()
self.assertIs(code, grpc.StatusCode.OK)
def test_successful_stream_unary(self):
rpc = self._real_time_server.invoke_stream_unary(
_application_testing_common.FIRST_SERVICE_STREUN, (), None)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.requests_closed()
initial_metadata = rpc.initial_metadata()
response, trailing_metadata, code, details = rpc.termination()
self.assertEqual(_application_common.STREAM_UNARY_RESPONSE, response)
self.assertIs(code, grpc.StatusCode.OK)
def test_successful_stream_stream(self):
rpc = self._real_time_server.invoke_stream_stream(
_application_testing_common.FIRST_SERVICE_STRESTRE, (), None)
rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
initial_metadata = rpc.initial_metadata()
responses = [
rpc.take_response(),
rpc.take_response(),
]
rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
responses.extend([
rpc.take_response(),
rpc.take_response(),
rpc.take_response(),
rpc.take_response(),
])
rpc.requests_closed()
trailing_metadata, code, details = rpc.termination()
for response in responses:
self.assertEqual(_application_common.STREAM_STREAM_RESPONSE,
response)
self.assertIs(code, grpc.StatusCode.OK)
def test_mutating_stream_stream(self):
rpc = self._real_time_server.invoke_stream_stream(
_application_testing_common.FIRST_SERVICE_STRESTRE, (), None)
rpc.send_request(_application_common.STREAM_STREAM_MUTATING_REQUEST)
initial_metadata = rpc.initial_metadata()
responses = [
rpc.take_response()
for _ in range(_application_common.STREAM_STREAM_MUTATING_COUNT)
]
rpc.send_request(_application_common.STREAM_STREAM_MUTATING_REQUEST)
responses.extend([
rpc.take_response()
for _ in range(_application_common.STREAM_STREAM_MUTATING_COUNT)
])
rpc.requests_closed()
_, _, _ = rpc.termination()
expected_responses = (
services_pb2.Bottom(first_bottom_field=0),
services_pb2.Bottom(first_bottom_field=1),
services_pb2.Bottom(first_bottom_field=0),
services_pb2.Bottom(first_bottom_field=1),
)
self.assertSequenceEqual(expected_responses, responses)
def test_server_rpc_idempotence(self):
rpc = self._real_time_server.invoke_unary_unary(
_application_testing_common.FIRST_SERVICE_UNUN, (),
_application_common.UNARY_UNARY_REQUEST, None)
first_initial_metadata = rpc.initial_metadata()
second_initial_metadata = rpc.initial_metadata()
third_initial_metadata = rpc.initial_metadata()
first_termination = rpc.termination()
second_termination = rpc.termination()
third_termination = rpc.termination()
for later_initial_metadata in (
second_initial_metadata,
third_initial_metadata,
):
self.assertEqual(first_initial_metadata, later_initial_metadata)
response = first_termination[0]
terminal_metadata = first_termination[1]
code = first_termination[2]
details = first_termination[3]
for later_termination in (
second_termination,
third_termination,
):
self.assertEqual(response, later_termination[0])
self.assertEqual(terminal_metadata, later_termination[1])
self.assertIs(code, later_termination[2])
self.assertEqual(details, later_termination[3])
self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response)
self.assertIs(code, grpc.StatusCode.OK)
def test_misbehaving_client_unary_unary(self):
rpc = self._real_time_server.invoke_unary_unary(
_application_testing_common.FIRST_SERVICE_UNUN, (),
_application_common.ERRONEOUS_UNARY_UNARY_REQUEST, None)
initial_metadata = rpc.initial_metadata()
response, trailing_metadata, code, details = rpc.termination()
self.assertIsNot(code, grpc.StatusCode.OK)
def test_infinite_request_stream_real_time(self):
rpc = self._real_time_server.invoke_stream_unary(
_application_testing_common.FIRST_SERVICE_STREUN, (),
_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
initial_metadata = rpc.initial_metadata()
self._real_time.sleep_for(
_application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
response, trailing_metadata, code, details = rpc.termination()
self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED)
def test_infinite_request_stream_fake_time(self):
rpc = self._fake_time_server.invoke_stream_unary(
_application_testing_common.FIRST_SERVICE_STREUN, (),
_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
initial_metadata = rpc.initial_metadata()
self._fake_time.sleep_for(
_application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2)
rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
response, trailing_metadata, code, details = rpc.termination()
self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED)
def test_servicer_context_abort(self):
rpc = self._real_time_server.invoke_unary_unary(
_application_testing_common.FIRST_SERVICE_UNUN, (),
_application_common.ABORT_REQUEST, None)
_, _, code, _ = rpc.termination()
self.assertIs(code, grpc.StatusCode.PERMISSION_DENIED)
rpc = self._real_time_server.invoke_unary_unary(
_application_testing_common.FIRST_SERVICE_UNUN, (),
_application_common.ABORT_SUCCESS_QUERY, None)
response, _, code, _ = rpc.termination()
self.assertEqual(_application_common.ABORT_SUCCESS_RESPONSE, response)
self.assertIs(code, grpc.StatusCode.OK)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
jjingrong/PONUS-1.2 | venv/build/django/django/utils/unittest/loader.py | 110 | 13445 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, unittest.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| mit |
rolandgeider/wger | wger/__init__.py | 1 | 1028 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (2, 0, 0, 'alpha', 2)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '.dev0'
return main + sub
| agpl-3.0 |
technige/py2neo | test/integration/test_merge.py | 1 | 13457 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises
from py2neo import Node, Relationship
from py2neo.data import UniquenessError
def test_can_merge_node_that_does_not_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order + 1
def test_can_merge_node_that_does_exist(graph, make_unique_id):
label = make_unique_id()
graph.create(Node(label, name="Alice"))
alice = Node(label, name="Alice")
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order
def test_cannot_merge_node_where_two_exist(graph, make_unique_id):
label = make_unique_id()
graph.create(Node(label, name="Alice"))
graph.create(Node(label, name="Alice"))
alice = Node(label, name="Alice")
with raises(UniquenessError):
graph.merge(alice, label, "name")
def test_can_merge_bound_node(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
graph.create(alice)
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order
def test_can_merge_node_that_does_not_exist_on_specific_label_and_key(graph, make_unique_id):
label = make_unique_id()
label_2 = make_unique_id()
alice = Node(label, label_2, name="Alice", age=33)
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order + 1
def test_can_merge_node_that_does_exist_on_specific_label_and_key_with_extra_properties(graph, make_unique_id):
label = make_unique_id()
label_2 = make_unique_id()
graph.create(Node(label, name="Alice"))
alice = Node(label, label_2, name="Alice", age=33)
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order
def test_can_merge_node_that_does_exist_on_specific_label_and_key_with_other_properties(graph, make_unique_id):
label = make_unique_id()
label_2 = make_unique_id()
graph.create(Node(label, name="Alice", age=44))
alice = Node(label, label_2, name="Alice", age=33)
old_order = len(graph.nodes)
graph.merge(alice, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert graph.exists(alice)
new_order = len(graph.nodes)
assert new_order == old_order
def test_can_merge_relationship_that_does_not_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
ab = Relationship(alice, "KNOWS", bob)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert bob.graph is graph
assert bob.identity is not None
assert ab.graph is graph
assert ab.identity is not None
assert graph.exists(alice | bob | ab)
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order + 2
assert new_size == old_size + 1
def test_can_merge_relationship_where_one_node_exists(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
graph.create(alice)
bob = Node(label, name="Bob")
ab = Relationship(alice, "KNOWS", bob)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert bob.graph is graph
assert bob.identity is not None
assert ab.graph is graph
assert ab.identity is not None
assert graph.exists(alice | bob | ab)
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order + 1
assert new_size == old_size + 1
def test_can_merge_relationship_where_all_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
graph.create(Relationship(alice, "KNOWS", Node(label, name="Bob")))
bob = Node(label, name="Bob")
ab = Relationship(alice, "KNOWS", bob)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert bob.graph is graph
assert bob.identity is not None
assert ab.graph is graph
assert ab.identity is not None
assert graph.exists(alice | bob | ab)
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order
assert new_size == old_size
def test_can_merge_relationship_with_space_in_name(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
ab = Relationship(alice, "MARRIED TO", bob)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab, label, "name")
assert alice.graph is graph
assert alice.identity is not None
assert bob.graph is graph
assert bob.identity is not None
assert ab.graph is graph
assert ab.identity is not None
assert graph.exists(alice | bob | ab)
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order + 2
assert new_size == old_size + 1
def test_cannot_merge_non_subgraph(graph, make_unique_id):
with raises(TypeError):
graph.merge("this string is definitely not a subgraph")
def test_can_merge_three_nodes_where_none_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
carol = Node(label, name="Carol")
old_order = len(graph.nodes)
subgraph = alice | bob | carol
graph.merge(subgraph, label, "name")
for node in subgraph.nodes:
assert node.graph is graph
assert node.identity is not None
assert graph.exists(node)
new_order = len(graph.nodes)
assert new_order == old_order + 3
def test_can_merge_three_nodes_where_one_exists(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
carol = Node(label, name="Carol")
graph.create(alice)
old_order = len(graph.nodes)
subgraph = alice | bob | carol
graph.merge(subgraph, label, "name")
for node in subgraph.nodes:
assert node.graph is graph
assert node.identity is not None
assert graph.exists(node)
new_order = len(graph.nodes)
assert new_order == old_order + 2
def test_can_merge_three_nodes_where_two_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
carol = Node(label, name="Carol")
graph.create(alice | bob)
old_order = len(graph.nodes)
subgraph = alice | bob | carol
graph.merge(subgraph, label, "name")
for node in subgraph.nodes:
assert node.graph is graph
assert node.identity is not None
assert graph.exists(node)
new_order = len(graph.nodes)
assert new_order == old_order + 1
def test_can_merge_three_nodes_where_three_exist(graph, make_unique_id):
label = make_unique_id()
alice = Node(label, name="Alice")
bob = Node(label, name="Bob")
carol = Node(label, name="Carol")
graph.create(alice | bob | carol)
old_order = len(graph.nodes)
subgraph = alice | bob | carol
graph.merge(subgraph, label, "name")
for node in subgraph.nodes:
assert node.graph is graph
assert node.identity is not None
assert graph.exists(node)
new_order = len(graph.nodes)
assert new_order == old_order
def test_can_merge_long_straight_walkable(graph, make_unique_id):
label = make_unique_id()
a = Node(label, name="Alice")
b = Node(label, name="Bob")
c = Node(label, name="Carol")
d = Node(label, name="Dave")
ab = Relationship(a, "KNOWS", b)
cb = Relationship(c, "KNOWS", b)
cd = Relationship(c, "KNOWS", d)
graph.create(a)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab + cb + cd, label, "name")
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order + 3
assert new_size == old_size + 3
def test_can_merge_long_walkable_with_repeats(graph, make_unique_id):
label = make_unique_id()
a = Node(label, name="Alice")
b = Node(label, name="Bob")
c = Node(label, name="Carol")
d = Node(label, name="Dave")
ab = Relationship(a, "KNOWS", b)
cb = Relationship(c, "KNOWS", b)
cd = Relationship(c, "KNOWS", d)
bd = Relationship(b, "KNOWS", d)
graph.create(a)
old_order = len(graph.nodes)
old_size = len(graph.relationships)
graph.merge(ab + cb + cb + bd + cd, label, "name")
new_order = len(graph.nodes)
new_size = len(graph.relationships)
assert new_order == old_order + 3
assert new_size == old_size + 4
def test_cannot_merge_without_arguments(graph, make_unique_id):
node = Node()
with raises(ValueError):
graph.merge(node)
def test_can_merge_with_arguments(graph, make_unique_id):
label_a = make_unique_id()
label_b = make_unique_id()
a = Node(label_a, a=1)
b = Node(label_b, b=2)
graph.create(a | b)
a_id = a.identity
b_id = b.identity
node = Node(label_a, label_b, a=1, b=2)
graph.merge(node, label_a, "a")
assert node.identity == a_id
assert node.identity != b_id
def test_merge_with_magic_values_overrides_arguments(graph, make_unique_id):
from py2neo.ogm import Model
label_a = make_unique_id()
label_b = make_unique_id()
class ModelB(Model):
__primarylabel__ = label_b
__primarykey__ = "b"
a = Node(label_a, a=1)
b = Node(label_b, b=2)
graph.create(a | b)
a_id = a.identity
b_id = b.identity
node = Node(label_a, label_b, a=1, b=2)
node.__model__ = ModelB
graph.merge(node, label_a, "a")
assert node.identity != a_id
assert node.identity == b_id
def test_can_merge_node_with_merge_arguments(graph):
graph.delete_all()
a = Node("Person", name="Alice")
graph.merge(a, "Person", "name")
assert graph.nodes.match("Person", name="Alice").first() == a
def test_can_merge_node_with_primary_label_and_key(graph):
graph.delete_all()
a = Node("Person", name="Alice")
a.__primarylabel__ = "Person"
a.__primarykey__ = "name"
graph.merge(a)
assert graph.nodes.match("Person", name="Alice").first() == a
def test_can_merge_node_with_model(graph):
from py2neo.ogm import Model
class Person(Model):
__primarylabel__ = "Person"
__primarykey__ = "name"
graph.delete_all()
a = Node("Person", name="Alice")
a.__model__ = Person
graph.merge(a)
assert graph.nodes.match("Person", name="Alice").first() == a
def test_can_merge_node_with_model_overriding_arguments(graph):
from py2neo.ogm import Model
class Person(Model):
__primarylabel__ = "Person"
__primarykey__ = "name"
graph.delete_all()
a = Node("Person", name="Alice")
a.__model__ = Person
graph.merge(a, "Human", "nom")
assert graph.nodes.match("Person", name="Alice").first() == a
def test_can_merge_node_with_primary_label_overriding_model(graph):
from py2neo.ogm import Model
class Person(Model):
__primarylabel__ = "Human"
__primarykey__ = "name"
graph.delete_all()
a = Node("Person", name="Alice")
a.__model__ = Person
a.__primarylabel__ = "Person"
graph.merge(a)
assert graph.nodes.match("Person", name="Alice").first() == a
def test_can_merge_node_with_primary_key_overriding_model(graph):
from py2neo.ogm import Model
class Person(Model):
__primarylabel__ = "Person"
__primarykey__ = "nom"
graph.delete_all()
a = Node("Person", name="Alice")
a.__model__ = Person
a.__primarykey__ = "name"
graph.merge(a)
assert graph.nodes.match("Person", name="Alice").first() == a
| apache-2.0 |
codepantry/django | django/core/management/commands/testserver.py | 239 | 2048 | from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
help = 'Runs a development server with data from the given fixture(s).'
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='*',
help='Path(s) to fixtures to load before running the server.')
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--addrport', default='',
help='Port number or ipaddr:port to run the server on.')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
def handle(self, *fixture_labels, **options):
verbosity = options.get('verbosity')
interactive = options.get('interactive')
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive, serialize=False)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = (
'\nServer stopped.\nNote that the test database, %r, has not been '
'deleted. You can explore it on your own.' % db_name
)
use_threading = connection.features.test_db_allows_multiple_connections
call_command(
'runserver',
addrport=options['addrport'],
shutdown_message=shutdown_message,
use_reloader=False,
use_ipv6=options['use_ipv6'],
use_threading=use_threading
)
| bsd-3-clause |
darkleons/odoo | openerp/addons/test_impex/tests/test_import.py | 154 | 30429 | # -*- coding: utf-8 -*-
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def ok(n):
""" Successful import of ``n`` records
:param int n: number of records which should have been imported
"""
return n, 0, 0, 0
def error(row, message, record=None, **kwargs):
""" Failed import of the record ``record`` at line ``row``, with the error
message ``message``
:param str message:
:param dict record:
"""
return (
-1, dict(record or {}, **kwargs),
"Line %d : %s" % (row, message),
'')
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
def import_(self, fields, rows, context=None):
return self.model.import_data(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
self.assertEqual(
self.import_(['.id', 'value'], [['42', '36']]),
error(1, u"Unknown database identifier '42'"))
def test_create_with_xid(self):
self.assertEqual(
self.import_(['id', 'value'], [['somexmlid', '42']]),
ok(1))
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
self.assertEqual(
self.import_(['.id', 'value'], [[str(id), '42']]),
ok(1))
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_exported(self):
self.assertEqual(
self.import_(['value'], [
['False'],
['True'],
]),
ok(2))
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
self.assertEqual(
self.import_(['value'], [
[u'0'],
[u'no'],
[u'false'],
[u'FALSE'],
[u''],
]),
ok(5))
self.assertEqual([
False,
False,
False,
False,
False,
],
values(self.read()))
def test_trues(self):
self.assertEqual(
self.import_(['value'], [
['off'],
['None'],
['nil'],
['()'],
['f'],
['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'],
[u'OFF'],
]),
ok(8))
self.assertEqual(
[True] * 8,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
]),
ok(4))
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
]),
ok(5))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db')
def test_out_of_range(self):
self.assertEqual(
self.import_(['value'], [[str(2**31)]]),
error(1, "integer out of range\n"))
# auto-rollbacks if error is in process_liness, but not during
# ir.model.data write. Can differentiate because former ends lines
# error lines with "!"
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[str(-2**32)]]),
error(1, "integer out of range\n"))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['zorglub']]),
error(1, u"'zorglub' does not seem to be an integer for field 'unknown'"))
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
]),
ok(6))
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
]),
ok(7))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['foobar']]),
error(1, u"'foobar' does not seem to be a number for field 'unknown'"))
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
]),
ok(4))
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
]),
ok(2))
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
self.assertEqual(
self.import_(['value'], [[s]]),
ok(1))
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
]),
ok(4))
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'}),
ok(3))
self.assertEqual([3, 1, 2], values(self.read()))
self.assertEqual(
self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'}),
ok(1))
def test_invalid(self):
self.assertEqual(
self.import_(['value'], [['Baz']]),
error(1, u"Value 'Baz' not found in selection field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[42]]),
error(1, u"Value '42' not found in selection field 'unknown'"))
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => ?
self.assertEqual(
self.import_(['value'], [
['3'],
["Grault"],
]),
ok(2))
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
], context={'lang': 'fr_FR'}),
ok(2))
self.assertEqual(
self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'}),
ok(1))
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
self.assertEqual(
self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
]),
ok(3))
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
self.assertEqual(
self.import_(['value/id'], [[xid]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.import_(['value/.id'], [[integer_id]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
self.assertEqual(
self.import_(['value'], [[name2]]),
ok(1))
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % integer_id1))
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
self.assertEqual(
self.import_(['value/value'], [['42']]),
error(1, u"Can not create Many-To-One records indirectly, import the field separately"))
def test_fail_noids(self):
self.assertEqual(
self.import_(['value'], [['nameisnoexist:3']]),
error(1, u"No matching record found for name 'nameisnoexist:3' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/id'], [['noxidhere']]),
error(1, u"No matching record found for external id 'noxidhere' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/.id'], [[66]]),
error(1, u"No matching record found for database id '66' in field 'unknown'"))
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
self.assertEqual(
self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
]),
ok(4))
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
self.assertEqual(
self.import_(['value/.id'], [['42']]),
error(1, u"No matching record found for database id '42' in field 'unknown'"))
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
self.assertEqual(
self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
self.assertEqual(
self.import_(['value/id'], [['noxidforthat']]),
error(1, u"No matching record found for external id 'noxidforthat' in field 'unknown'"))
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
self.assertEqual(
self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
self.assertEqual(
self.import_(['value'], [['wherethem2mhavenonames']]),
error(1, u"No matching record found for name 'wherethem2mhavenonames' in field 'unknown'"))
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]]),
ok(1))
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]]),
ok(1))
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them to' \
u' stack traces'
self.assertEqual(
self.import_(
['const', 'value'],
[['5', s]]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % s))
def test_single(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63']
]),
ok(1))
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
]),
ok(2))
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
self.assertEqual(
self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
try:
self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
except ValueError, e:
# should be Exception(Database ID doesn't exist: export.one2many.child : $id1,$id2)
self.assertIs(type(e), ValueError)
self.assertEqual(
e.args[0],
"invalid literal for int() with base 10: '%d,%d'" % (id1, id2))
def test_link(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
]),
ok(4))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
]),
ok(6))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
]),
ok(7))
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
# function, related, reference: written to db as-is...
# => function uses @type for value coercion/conversion
| agpl-3.0 |
johnteslade/azulejo | azulejo/azulejo_controller.py | 1 | 1499 | import logging
from .arrange_maximize import ArrangeMaximize
from .arrange_move_monitor import ArrangeMoveMonitor
from .arrange_move_window import ArrangeMoveWindow
from .arrange_multiple_windows import ArrangeMultipleWindows
from .arrange_rotate import ArrangeRotate
from .arrange_single_window import ArrangeSingleWindow
class AzulejoController(object):
"""Call correct handler class for an action."""
def __init__(self, screen_obj_in):
""" Initialiser """
# Set the main screen object
if screen_obj_in:
self._screen = screen_obj_in
else:
from .azulejo_screen import AzulejoScreen
self._screen = AzulejoScreen()
self._action_classes = {
'maximize': ArrangeMaximize(self._screen),
'move_monitor': ArrangeMoveMonitor(self._screen),
'move_single_window': ArrangeMoveWindow(self._screen),
'resize_single_window': ArrangeSingleWindow(self._screen),
'resize_windows': ArrangeMultipleWindows(self._screen),
'rotate_windows': ArrangeRotate(self._screen),
}
def do_action(self, function, params):
""" Returns the function for a given action """
# Force screen update
self._screen.update()
logging.debug("--- Starting action {}".format(function))
# Call the correct function
self._action_classes[function].do(params)
logging.debug("--- Action done {}".format(function))
| mit |
CHBMB/LazyLibrarian | lazylibrarian/unittests/test_versioncheck.py | 1 | 5093 | import os
import unittest
import lazylibrarian
from lazylibrarian import versioncheck, version
#
# These tests MUST be executed in the root PROG_DIR directory. Otherwise
# test for GIT install doesn't work correctly
# execution is via python -m unittest discover -s lazylibrarian/unittests/ -v
class VersionCheckTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
lazylibrarian.INSTALL_TYPE = ''
lazylibrarian.PROG_DIR = ''
version.LAZYLIBRARIAN_VERSION = ''
# Check setVersion function
def test_getInstallTypeGIT(self):
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
result = versioncheck.getInstallType()
self.assertEquals("git", lazylibrarian.INSTALL_TYPE)
def test_getInstallTypeWIN(self):
version.LAZYLIBRARIAN_VERSION = 'win32build'
result = versioncheck.getInstallType()
self.assertEquals("win", lazylibrarian.INSTALL_TYPE)
self.assertEquals("Windows", lazylibrarian.CURRENT_BRANCH)
def test_getInstallTypeSource(self):
lazylibrarian.PROG_DIR = '/tmp'
result = versioncheck.getInstallType()
self.assertEquals("source", lazylibrarian.INSTALL_TYPE)
self.assertEquals("master", lazylibrarian.CURRENT_BRANCH)
# getCurrentBranch returns branch name of current install.
# for most it should be master.
def test_getCurrentBranch(self):
lazylibrarian.INSTALL_TYPE = 'git'
# doesn't work as tests are in unittest directory. so it doesn't pick up the root install dir
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
result = versioncheck.getCurrentGitBranch()
self.assertEquals("master", result)
# IF you have a windows or non git based install this should return "NON GIT INSTALL"
def test_getCurrentBranchForNonGITInstalls(self):
lazylibrarian.INSTALL_TYPE = 'win'
result = versioncheck.getCurrentGitBranch()
self.assertEquals("NON GIT INSTALL", result)
#
# getVersion Unit test - 4 options
# No install type set
# Install type - win, git, source
# check responses back from each setting is correct.
def test_getCurrentVersion(self):
result = versioncheck.getCurrentVersion()
self.assertEquals("Install type not set", result)
# can never pass as the version should alwayscheck next version
def test_getCurrentVersion_ForGIT(self):
lazylibrarian.INSTALL_TYPE = 'git'
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
# lazylibrarian.PROG_DIR = 'doesnt matter'
result = versioncheck.getCurrentVersion()
self.assertEquals("ac3be411f792c62895ad16bc120d92eaf44345c2", result)
def test_getCurrentVersion_ForWindows(self):
# Over write the version file value
lazylibrarian.INSTALL_TYPE = 'win'
result = versioncheck.getCurrentVersion()
self.assertEquals("Windows Install", result)
def test_getCurrentVersion_ForSource(self):
# Over write the version file value
lazylibrarian.INSTALL_TYPE = 'source'
result = versioncheck.getCurrentVersion()
self.assertEquals("test-version-file", result)
# simple git test, just check a version is returned but not care about the version number
def test_runGit(self):
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
output, err = versioncheck.runGit('--version')
self.assertTrue(output.strip().startswith('git version'))
def test_getLatestVersion_GIT(self):
lazylibrarian.INSTALL_TYPE = 'git'
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
result = versioncheck.getLatestVersion()
self.assertEquals(lazylibrarian.LATEST_VERSION, result)
def test_getLatestVersion_SOURCE(self):
lazylibrarian.INSTALL_TYPE = 'source'
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
result = versioncheck.getLatestVersion()
self.assertEquals(lazylibrarian.LATEST_VERSION, result)
def test_getLatestVersion_WIN(self):
lazylibrarian.INSTALL_TYPE = 'win'
result = versioncheck.getLatestVersion()
self.assertEquals("WIN INSTALL", result)
def test_getLatestVersion(self):
result = versioncheck.getLatestVersion()
self.assertEquals("UNKNOWN INSTALL", result)
def test_getLatestVersionaFromGit_TypeWin(self):
lazylibrarian.INSTALL_TYPE = 'win'
result = versioncheck.getLatestVersionaFromGit()
self.assertEquals("WINDOWS INSTALL", result)
def test_getLatestVersionaFromGit_TypeGit(self):
lazylibrarian.INSTALL_TYPE = 'git'
lazylibrarian.PROG_DIR = os.path.dirname(os.path.abspath(__file__))
result = versioncheck.getLatestVersionaFromGit()
# self.assertEquals('ac3be411f792c62895ad16bc120d92eaf44345c2',result)
pass
def test_updateVersionFile(self):
pass
# tests todo
# OS Install
# auto update on/off
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
stianvi/ansible-modules-core | packaging/language/easy_install.py | 198 | 6956 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import tempfile
import os.path
DOCUMENTATION = '''
---
module: easy_install
short_description: Installs Python libraries
description:
- Installs Python libraries, optionally in a I(virtualenv)
version_added: "0.7"
options:
name:
description:
- A Python library name
required: true
default: null
aliases: []
virtualenv:
description:
- an optional I(virtualenv) directory path to install into. If the
I(virtualenv) does not exist, it is created automatically
required: false
default: null
virtualenv_site_packages:
version_added: "1.1"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command to create the virtual environment with. For example
C(pyvenv), C(virtualenv), C(virtualenv2).
required: false
default: virtualenv
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
system. For example C(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
version_added: "1.3"
required: false
default: null
state:
version_added: "2.0"
description:
- The desired state of the library. C(latest) ensures that the latest version is installed.
required: false
choices: [present, latest]
default: present
notes:
- Please note that the M(easy_install) module can only install Python
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(pip) module which you can first install
using M(easy_install).
- Also note that I(virtualenv) must be installed on the remote host if the
C(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Examples from Ansible Playbooks
- easy_install: name=pip state=latest
# Install Bottle into the specified virtualenv.
- easy_install: name=bottle virtualenv=/webapps/myapp/venv
'''
def _is_package_installed(module, name, easy_install, executable_arguments):
executable_arguments = executable_arguments + ['--dry-run']
cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
rc, status_stdout, status_stderr = module.run_command(cmd)
return not ('Reading' in status_stdout or 'Downloading' in status_stdout)
def _get_easy_install(module, env=None, executable=None):
candidate_easy_inst_basenames = ['easy_install']
easy_install = None
if executable is not None:
if os.path.isabs(executable):
easy_install = executable
else:
candidate_easy_inst_basenames.insert(0, executable)
if easy_install is None:
if env is None:
opt_dirs = []
else:
# Try easy_install with the virtualenv directory first.
opt_dirs = ['%s/bin' % env]
for basename in candidate_easy_inst_basenames:
easy_install = module.get_bin_path(basename, False, opt_dirs)
if easy_install is not None:
break
# easy_install should have been found by now. The final call to
# get_bin_path will trigger fail_json.
if easy_install is None:
basename = candidate_easy_inst_basenames[0]
easy_install = module.get_bin_path(basename, True, opt_dirs)
return easy_install
def main():
arg_spec = dict(
name=dict(required=True),
state=dict(required=False,
default='present',
choices=['present','latest'],
type='str'),
virtualenv=dict(default=None, required=False),
virtualenv_site_packages=dict(default='no', type='bool'),
virtualenv_command=dict(default='virtualenv', required=False),
executable=dict(default='easy_install', required=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
env = module.params['virtualenv']
executable = module.params['executable']
site_packages = module.params['virtualenv_site_packages']
virtualenv_command = module.params['virtualenv_command']
executable_arguments = []
if module.params['state'] == 'latest':
executable_arguments.append('--upgrade')
rc = 0
err = ''
out = ''
if env:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
cwd = tempfile.gettempdir()
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
err += err_venv
easy_install = _get_easy_install(module, env, executable)
cmd = None
changed = False
installed = _is_package_installed(module, name, easy_install, executable_arguments)
if not installed:
if module.check_mode:
module.exit_json(changed=True)
cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd)
rc += rc_easy_inst
out += out_easy_inst
err += err_easy_inst
changed = True
if rc != 0:
module.fail_json(msg=err, cmd=cmd)
module.exit_json(changed=changed, binary=easy_install,
name=name, virtualenv=env)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
GbalsaC/bitnamiP | common/lib/capa/capa/tests/test_targeted_feedback.py | 196 | 25678 | """
Tests the logic of the "targeted-feedback" attribute for MultipleChoice questions,
i.e. those with the <multiplechoiceresponse> element
"""
import unittest
import textwrap
from . import test_capa_system, new_loncapa_problem, load_fixture
class CapaTargetedFeedbackTest(unittest.TestCase):
'''
Testing class
'''
def setUp(self):
super(CapaTargetedFeedbackTest, self).setUp()
self.system = test_capa_system()
def test_no_targeted_feedback(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback2">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 2nd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<div>.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*</div>")
self.assertRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC")
def test_targeted_feedback_not_finished(self):
problem = new_loncapa_problem(load_fixture('targeted_feedback.xml'))
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<div>.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC")
self.assertEquals(the_html, problem.get_html(), "Should be able to call get_html() twice")
def test_targeted_feedback_student_answer1(self):
problem = new_loncapa_problem(load_fixture('targeted_feedback.xml'))
problem.done = True
problem.student_answers = {'1_2_1': 'choice_3'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback3\">.*3rd WRONG solution")
self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedbackC")
# Check that calling it multiple times yields the same thing
the_html2 = problem.get_html()
self.assertEquals(the_html, the_html2)
def test_targeted_feedback_student_answer2(self):
problem = new_loncapa_problem(load_fixture('targeted_feedback.xml'))
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC")
def test_targeted_feedback_id_typos(self):
"""Cases where the explanation-id's don't match anything."""
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1TYPO">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackCTYPO">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback2">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 2nd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
# explanation-id does not match anything: fall back to empty targetedfeedbackset
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'}
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<targetedfeedbackset>\s*</targetedfeedbackset>")
# New problem with same XML -- try the correct choice.
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_2'} # correct
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<targetedfeedbackset>\s*</targetedfeedbackset>")
def test_targeted_feedback_no_solution_element(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="">
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
</problem>
""")
# Solution element not found
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_2'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
# </div> right after </targetedfeedbackset>
self.assertRegexpMatches(
without_new_lines,
r"<div>.*<targetedfeedbackset>.*</targetedfeedbackset>\s*</div>"
)
def test_targeted_feedback_show_solution_explanation(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback2">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 2nd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3")
# Check that calling it multiple times yields the same thing
the_html2 = problem.get_html()
self.assertEquals(the_html, the_html2)
def test_targeted_feedback_no_show_solution_explanation(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback2">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 2nd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
self.assertNotRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC")
def test_targeted_feedback_with_solutionset_explanation(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
<choice correct="true" explanation-id="feedbackC2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback2">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 2nd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC2">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on the other solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solutionset>
<solution explanation-id="feedbackC2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the other solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC2\".*other solution explanation")
self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3")
def test_targeted_feedback_no_feedback_for_selected_choice1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solutionset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</solutionset>
</problem>
""")
# The student choses one with no feedback, but alwaysShowCorrectChoiceExplanation
# is in force, so we should see the correct solution feedback.
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_1'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3")
def test_targeted_feedback_no_feedback_for_selected_choice2(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse targeted-feedback="">
<choicegroup type="MultipleChoice">
<choice correct="false" explanation-id="feedback1">wrong-1</choice>
<choice correct="false" explanation-id="feedback2">wrong-2</choice>
<choice correct="true" explanation-id="feedbackC">correct-1</choice>
<choice correct="false" explanation-id="feedback3">wrong-3</choice>
</choicegroup>
</multiplechoiceresponse>
<targetedfeedbackset>
<targetedfeedback explanation-id="feedback1">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 1st WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedback3">
<div class="detailed-targeted-feedback">
<p>Targeted Feedback</p>
<p>This is the 3rd WRONG solution</p>
</div>
</targetedfeedback>
<targetedfeedback explanation-id="feedbackC">
<div class="detailed-targeted-feedback-correct">
<p>Targeted Feedback</p>
<p>Feedback on your correct solution...</p>
</div>
</targetedfeedback>
</targetedfeedbackset>
<solutionset>
<solution explanation-id="feedbackC">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution explanation</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</solutionset>
</problem>
""")
# The student chooses one with no feedback set, so we check that there's no feedback.
problem = new_loncapa_problem(xml_str)
problem.done = True
problem.student_answers = {'1_2_1': 'choice_1'}
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
self.assertNotRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3|feedbackC")
def test_targeted_feedback_multiple_not_answered(self):
# Not answered -> empty targeted feedback
problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml'))
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
# Q1 and Q2 have no feedback
self.assertRegexpMatches(
without_new_lines,
r'<targetedfeedbackset.*?>\s*</targetedfeedbackset>.*' +
r'<targetedfeedbackset.*?>\s*</targetedfeedbackset>'
)
def test_targeted_feedback_multiple_answer_1(self):
problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml'))
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0'} # feedback1
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
# Q1 has feedback1 and Q2 has nothing
self.assertRegexpMatches(
without_new_lines,
r'<targetedfeedbackset.*?>.*?explanation-id="feedback1".*?</targetedfeedbackset>.*' +
r'<targetedfeedbackset.*?>\s*</targetedfeedbackset>'
)
def test_targeted_feedback_multiple_answer_2(self):
problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml'))
problem.done = True
problem.student_answers = {'1_2_1': 'choice_0', '1_3_1': 'choice_2'} # Q1 wrong, Q2 correct
the_html = problem.get_html()
without_new_lines = the_html.replace("\n", "")
# Q1 has feedback1 and Q2 has feedbackC
self.assertRegexpMatches(
without_new_lines,
r'<targetedfeedbackset.*?>.*?explanation-id="feedback1".*?</targetedfeedbackset>.*' +
r'<targetedfeedbackset.*?>.*explanation-id="feedbackC".*?</targetedfeedbackset>'
)
| agpl-3.0 |
zouzhberk/ambaridemo | demo-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py | 1 | 5254 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Execute
from resource_management.core import shell
from resource_management.libraries.functions import format
from resource_management.libraries.functions.decorator import retry
def pre_upgrade_shutdown():
"""
Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
DataNode in preparation for an upgrade. This will then periodically check
"getDatanodeInfo" to ensure the DataNode has shutdown correctly.
This function will obtain the Kerberos ticket if security is enabled.
:return: Return True if ran ok (even with errors), and False if need to stop the datanode forcefully.
"""
import params
Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
if params.security_enabled:
Execute(params.dn_kinit_cmd, user = params.hdfs_user)
command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
code, output = shell.call(command, user=params.hdfs_user)
if code == 0:
# verify that the datanode is down
_check_datanode_shutdown()
else:
# Due to bug HDFS-7533, DataNode may not always shutdown during rolling upgrade, and it is necessary to kill it.
if output is not None and re.search("Shutdown already in progress", output):
Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
return False
return True
def post_upgrade_check():
"""
Verifies that the DataNode has rejoined the cluster. This function will
obtain the Kerberos ticket if security is enabled.
:return:
"""
import params
Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
if params.security_enabled:
Execute(params.dn_kinit_cmd,user = params.hdfs_user)
# verify that the datanode has started and rejoined the HDFS cluster
_check_datanode_startup()
@retry(times=24, sleep_time=5, err_class=Fail)
def _check_datanode_shutdown():
"""
Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
several times, pausing in between runs. Once the DataNode stops responding
this method will return, otherwise it will raise a Fail(...) and retry
automatically.
The stack defaults for retrying for HDFS are also way too slow for this
command; they are set to wait about 45 seconds between client retries. As
a result, a single execution of dfsadmin will take 45 seconds to retry and
the DataNode may be marked as dead, causing problems with HBase.
https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
times for ipc.client.connect.retry.interval. In the meantime, override them
here, but only for RU.
:return:
"""
import params
# override stock retry timeouts since after 30 seconds, the datanode is
# marked as dead and can affect HBase during RU
command = format('hdfs dfsadmin -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
try:
Execute(command, user=params.hdfs_user, tries=1)
except:
Logger.info("DataNode has successfully shutdown for upgrade.")
return
Logger.info("DataNode has not shutdown.")
raise Fail('DataNode has not shutdown.')
@retry(times=12, sleep_time=10, err_class=Fail)
def _check_datanode_startup():
"""
Checks that a DataNode is reported as being alive via the
"hdfs dfsadmin -report -live" command. Once the DataNode is found to be
alive this method will return, otherwise it will raise a Fail(...) and retry
automatically.
:return:
"""
import params
try:
# 'su - hdfs -c "hdfs dfsadmin -report -live"'
command = 'hdfs dfsadmin -report -live'
return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
except:
raise Fail('Unable to determine if the DataNode has started after upgrade.')
if return_code == 0:
if params.hostname.lower() in hdfs_output.lower():
Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
return
else:
raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
# return_code is not 0, fail
raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))
| apache-2.0 |
spektom/incubator-airflow | airflow/contrib/hooks/jenkins_hook.py | 5 | 1139 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.jenkins.hooks.jenkins`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.jenkins.hooks.jenkins`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
cyngus84/Ochre | ochre-app-engine-server/lib/itsdangerous.py | 626 | 31840 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
This can also happen with a :class:`JSONWebSignatureSerializer` that
is subclassed and uses a different serializer for the payload than
the expected one.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None,
original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception occurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
except Exception as e:
raise BadHeader('Could not base64 decode the header because of '
'an exception', original_error=e)
try:
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
try:
header = Serializer.load_payload(self, json_header,
serializer=json)
except BadData as e:
raise BadHeader('Could not unserialize header because it was '
'malformed', original_error=e)
if not isinstance(header, dict):
raise BadHeader('Header payload is not a JSON object',
header=header)
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header,
payload=payload)
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 |
hiteshgarg14/openstates | scrapers/id/committees.py | 2 | 6181 | """Scrapes Idaho committees for the latest term."""
from openstates.scrape import Scraper, Organization
import lxml.html
_COMMITTEE_URL = (
"https://legislature.idaho.gov/committees/%scommittees/" # house/senate
)
_JOINT_URL = "https://legislature.idaho.gov/sessioninfo/2017/joint/"
_CHAMBERS = {"upper": "senate", "lower": "house"}
_REV_CHAMBERS = {"senate": "upper", "house": "lower"}
def clean_name(name):
return name.replace(u"\xa0", " ")
class IDCommitteeScraper(Scraper):
def get_joint_committees_data(self, name, url):
page = self.get(url).text
html = lxml.html.fromstring(page)
org = Organization(name=name, chamber="legislature", classification="committee")
table = html.xpath("//section[@class=' row-equal-height no-padding']")
for td in table:
senate_members = td.xpath("div[1]/div/div/div[2]/div/p/strong")
if len(senate_members) > 0:
member_string = list(senate_members[0].itertext())
if len(member_string) > 1:
name = member_string[0]
role = member_string[1]
for ch in ["Sen.", ",", u"\u00a0"]:
name = name.replace(ch, " ").strip()
role = role.replace(ch, " ").strip()
org.add_member(name, role=role)
else:
name = member_string[0].replace("Sen.", " ").strip()
for ch in ["Sen.", ",", u"\u00a0"]:
name = name.replace(ch, " ").strip()
org.add_member(name)
house_members = list(td.xpath("div[2]/div/div/div[2]/div/p/strong"))
if len(house_members) > 0:
member_string = list(house_members[0].itertext())
if len(member_string) > 1:
name = member_string[0].replace("Rep.", " ").strip()
role = member_string[1].replace(",", " ").strip()
for ch in ["Rep.", ",", u"\u00a0"]:
name = name.replace(ch, " ").strip()
role = role.replace(ch, " ").strip()
org.add_member(name, role=role)
else:
name = member_string[0].replace("Rep.", " ").strip()
for ch in ["Rep.", ",", u"\u00a0"]:
name = name.replace(ch, " ").strip()
org.add_member(name)
org.add_source(url)
return org
def scrape_committees(self, chamber):
url = _COMMITTEE_URL % _CHAMBERS[chamber]
page = self.get(url).text
html = lxml.html.fromstring(page)
table = html.xpath("body/section[2]/div/div/section[2]/div[2]/div/div/div/div")
for row in table[1:]:
# committee name, description, hours of operation,
# secretary and office_phone
text = list(row[0].xpath("div")[0].itertext())
attributes = [
list(
value.replace(u"\xa0", " ")
.replace("Secretary:", "")
.encode("ascii", "ignore")
for value in text
if "Email:" not in value and value != "\n" and "Phone:" not in value
)
]
for i in range(len(attributes[0])):
if "Room" in str(attributes[0][i]):
attributes[0][i] = (
str(attributes[0][i]).split("Room")[0].replace(", ", " ")
)
org = Organization(
chamber=chamber,
classification="committee",
name=str(attributes[0][0].decode()),
)
if len(attributes[0]) > 5:
org.add_contact_detail(
type="email",
value=str(attributes[0][4].decode()),
note="District Office",
)
org.add_contact_detail(
type="voice",
value=str(attributes[0][5].decode()),
note="District Office",
)
else:
org.add_contact_detail(
type="email",
value=str(attributes[0][3].decode()),
note="District Office",
)
org.add_contact_detail(
type="voice",
value=str(attributes[0][4].decode()),
note="District Office",
)
org.add_source(url)
# membership
td_text = list()
for td in row[1].xpath("div") + row[2].xpath("div"):
td_text += td.itertext()
members = list(
value
for value in td_text
if value != " " and value != "\n" and value != ","
)
role = "member"
for member in members:
if member in ["Chair", "Vice Chair"]:
role = member.lower()
continue
elif member.strip():
org.add_member(member.strip(), role=role)
role = "member"
yield org
def scrape_joint_committees(self):
page = self.get(_JOINT_URL).text
html = lxml.html.fromstring(page)
html.make_links_absolute(_JOINT_URL)
joint_li = html.xpath('//div[contains(h2, "Joint")]/ul/li')
for li in joint_li:
name, url = li[0].text, li[0].get("href")
yield self.get_joint_committees_data(name, url)
def scrape(self, chamber=None):
"""
Scrapes Idaho committees for the latest term.
"""
# self.validate_term(term, latest_only=True)
if chamber in ["upper", "lower"]:
yield from self.scrape_committees(chamber)
elif chamber == "joint":
yield from self.scrape_joint_committees()
else:
yield from self.scrape_committees("upper")
yield from self.scrape_committees("lower")
yield from self.scrape_joint_committees()
| gpl-3.0 |
scripnichenko/nova | nova/tests/unit/api/openstack/compute/test_server_reset_state.py | 35 | 5107 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import admin_actions \
as admin_actions_v21
from nova.api.openstack.compute.legacy_v2.contrib import admin_actions \
as admin_actions_v2
from nova.compute import vm_states
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
class ResetStateTestsV21(test.NoDBTestCase):
admin_act = admin_actions_v21
bad_request = exception.ValidationError
def setUp(self):
super(ResetStateTestsV21, self).setUp()
self.uuid = uuidutils.generate_uuid()
self.admin_api = self.admin_act.AdminActionsController()
self.compute_api = self.admin_api.compute_api
self.request = self._get_request()
self.context = self.request.environ['nova.context']
def _get_request(self):
return fakes.HTTPRequest.blank('')
def test_no_state(self):
self.assertRaises(self.bad_request,
self.admin_api._reset_state,
self.request, self.uuid,
body={"os-resetState": None})
def test_bad_state(self):
self.assertRaises(self.bad_request,
self.admin_api._reset_state,
self.request, self.uuid,
body={"os-resetState": {"state": "spam"}})
def test_no_instance(self):
self.mox.StubOutWithMock(self.compute_api, 'get')
exc = exception.InstanceNotFound(instance_id='inst_ud')
self.compute_api.get(self.context, self.uuid, expected_attrs=None,
want_objects=True).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPNotFound,
self.admin_api._reset_state,
self.request, self.uuid,
body={"os-resetState": {"state": "active"}})
def _setup_mock(self, expected):
instance = objects.Instance()
instance.uuid = self.uuid
instance.vm_state = 'fake'
instance.task_state = 'fake'
instance.obj_reset_changes()
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api, 'get')
def check_state(admin_state_reset=True):
self.assertEqual(set(expected.keys()),
instance.obj_what_changed())
for k, v in expected.items():
self.assertEqual(v, getattr(instance, k),
"Instance.%s doesn't match" % k)
instance.obj_reset_changes()
self.compute_api.get(self.context, instance.uuid, expected_attrs=None,
want_objects=True).AndReturn(instance)
instance.save(admin_state_reset=True).WithSideEffects(check_state)
def test_reset_active(self):
self._setup_mock(dict(vm_state=vm_states.ACTIVE,
task_state=None))
self.mox.ReplayAll()
body = {"os-resetState": {"state": "active"}}
result = self.admin_api._reset_state(self.request, self.uuid,
body=body)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.admin_api,
admin_actions_v21.AdminActionsController):
status_int = self.admin_api._reset_state.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_reset_error(self):
self._setup_mock(dict(vm_state=vm_states.ERROR,
task_state=None))
self.mox.ReplayAll()
body = {"os-resetState": {"state": "error"}}
result = self.admin_api._reset_state(self.request, self.uuid,
body=body)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.admin_api,
admin_actions_v21.AdminActionsController):
status_int = self.admin_api._reset_state.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
class ResetStateTestsV2(ResetStateTestsV21):
admin_act = admin_actions_v2
bad_request = webob.exc.HTTPBadRequest
| apache-2.0 |
mateon1/servo | tests/wpt/css-tests/tools/pytest/testing/test_conftest.py | 171 | 14245 | from textwrap import dedent
import _pytest._code
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request, tmpdir_factory):
from _pytest.tmpdir import tmpdir
tmpdir = tmpdir(request, tmpdir_factory)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace:
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal:
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod('a', basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
l = conftest._getconftestmodules(conf)
assert len(l) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
l = conftest._getconftestmodules(p)
assert len(l) == 0
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
l = conftest._getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest._getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert 'warning: could not load initial' not in result.stdout.str()
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
monkeypatch.setattr(conftest, '_importconftest', impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""))
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""))
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""))
p = sub.join("test_hello.py")
p.write(py.std.textwrap.dedent("""
import pytest
def test_hello(found):
assert found == 1
"""))
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines("""
*--hello-world*
""")
class TestConftestVisibility:
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""))
package.join("test_pkgroot.py").write(dedent("""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""))
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""))
swc.join("test_with_conftest.py").write(dedent("""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""))
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(dedent("""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""))
print ("created directory structure:")
for x in testdir.tmpdir.visit():
print (" " + x.relto(testdir.tmpdir))
return {
"runner": runner,
"package": package,
"swc": swc,
"snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
])
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir,testarg, expect_ntests_passed):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" %(
dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" %(testarg))
print("expected pass : %s" %(expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize('confcutdir,passed,error', [
('.', 2, 0),
('src', 1, 1),
(None, 1, 1),
])
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to a ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join('src').ensure(dir=1)
src.join('pytest.ini').write('[pytest]')
src.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def fix1(): pass
"""))
src.join('test_foo.py').write(_pytest._code.Source("""
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""))
root.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def out_of_reach(): pass
"""))
args = [str(src)]
if confcutdir:
args = ['--confcutdir=%s' % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ''
if passed:
match += '*%d passed*' % passed
if error:
match += '*%d error*' % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest("""
class DontTouchMe:
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
""")
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 0
| mpl-2.0 |
azoft-dev-team/imagrium | libs/sikuli/Region.py | 2 | 3688 | # Copyright 2010-2013, Sikuli.org
# Released under the MIT License.
# modified RaiMan 2013
from org.sikuli.basics import Debug
from org.sikuli.script import Region as JRegion
from org.sikuli.script import ObserverCallBack
from org.sikuli.script.Constants import *
import sys
import inspect
DEBUG=False
class Region(JRegion):
# support for with:
# override all global sikuli functions by this region's methods.
def __enter__(self):
exclude_list = [ 'ROI' ]
if DEBUG: print "with: entering *****", self
self._global_funcs = {}
dict = sys.modules['__main__'].__dict__
for name in dir(self):
if name in exclude_list: continue
try:
if not inspect.ismethod(getattr(self,name)):
continue
except:
continue
if dict.has_key(name):
self._global_funcs[name] = dict[name]
if DEBUG and name == 'checkWith': print "with: save %s ( %s )"%(name, str(dict[name])[1:])
dict[name] = eval("self."+name)
if DEBUG and name == 'checkWith': print "with: is now: %s"%(str(dict[name])[1:])
return self
def __exit__(self, type, value, traceback):
if DEBUG: print "with: exiting ****", self
dict = sys.modules['__main__'].__dict__
for name in self._global_funcs.keys():
dict[name] = self._global_funcs[name]
if DEBUG and name == 'checkWith':
print "with restore: %s"%(str(dict[name])[1:])
self._global_funcs = None
#######################################################################
#---- SIKULI PUBLIC API
#######################################################################
# Python wait() needs to be here because Java Object has a final method: wait(long timeout).
# If we want to let Sikuli users use wait(int/long timeout), we need this Python method.
def wait(self, target, timeout=None):
if isinstance(target, int) or isinstance(target, long):
target = float(target)
if timeout == None:
return JRegion.wait(self, target)
else:
return JRegion.wait(self, target, timeout)
# the new Region.text() feature (Tesseract 3) returns utf8
def text(self):
return JRegion.text(self).encode("utf8")
# observe(): Special setup for Jython
# assures, that in any case the same region object is used
def onAppear(self, target, handler = None):
if not handler:
return self.onAppearJ(target, None)
class AnonyObserver(ObserverCallBack):
def appeared(self, event):
handler(event)
return self.onAppearJ(target, AnonyObserver())
def onVanish(self, target, handler = None):
if not handler:
return self.onVanishJ(target, None)
class AnonyObserver(ObserverCallBack):
def vanished(self, event):
handler(event)
return self.onVanishJ(target, AnonyObserver())
def onChange(self, arg1=0, arg2=None):
if isinstance(arg1, int):
min_size = arg1
handler = arg2
else:
if (arg2 != None):
raise Exception("onChange: Invalid parameters set")
min_size = 0
handler = arg1
if not handler:
return self.onChangeJ(min_size, None)
class AnonyObserver(ObserverCallBack):
def changed(self, event):
handler(event)
return self.onChangeJ(min_size, AnonyObserver())
def observe(self, time=FOREVER, background=False):
return self.observeJ(time, background) | mit |
dendisuhubdy/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/xml/sax/handler.py | 230 | 13921 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| gpl-2.0 |
rjschwei/azure-sdk-for-python | azure-keyvault/azure/keyvault/generated/models/key_operation_result.py | 5 | 1090 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyOperationResult(Model):
"""The key operation result.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar kid: Key identifier
:vartype kid: str
:ivar result:
:vartype result: bytes
"""
_validation = {
'kid': {'readonly': True},
'result': {'readonly': True},
}
_attribute_map = {
'kid': {'key': 'kid', 'type': 'str'},
'result': {'key': 'value', 'type': 'base64'},
}
def __init__(self):
self.kid = None
self.result = None
| mit |
bev-a-tron/pledge_service | testlib/bs4/tests/test_lxml.py | 273 | 2965 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| apache-2.0 |
ximenesuk/openmicroscopy | components/tools/OmeroFS/ez_setup.py | 15 | 10322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| gpl-2.0 |
tjsavage/sfcsdatabase | django/conf/locale/sv/formats.py | 16 | 1087 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/extra_modules/pyxmpp2/simple.py | 46 | 5271 | #
# (C) Copyright 2005-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Simple API for simple things like sendig messages.
The simplest way to send a message:
>>> from pyxmpp2.simple import send_message
>>> send_message("bob@example.org", "bob's password", "alice@example.org",
... "Hello Alice")
Please note, though:
- this is inefficient for anything more than sending a single message,
as a new connection is established and closed for each `send_message`
call.
- the default TLS settings are insecure (not peer certificate validation)
"""
__docformat__ = "restructuredtext en"
import sys
from .interfaces import EventHandler, event_handler, QUIT
from .client import Client
from .jid import JID
from .streamevents import AuthorizedEvent, DisconnectedEvent
from .message import Message
from .settings import XMPPSettings
class FireAndForget(EventHandler):
"""A minimal XMPP client that just connects to a server
and runs single function.
:Ivariables:
- `action`: the function to run after the stream is authorized
- `client`: a `Client` instance to do the rest of the job
:Types:
- `action`: a callable accepting a single 'client' argument
- `client`: `pyxmpp2.client.Client`
"""
def __init__(self, local_jid, action, settings):
self.action = action
self.client = Client(local_jid, [self], settings)
def run(self):
"""Request client connection and start the main loop."""
self.client.connect()
self.client.run()
def disconnect(self):
"""Request disconnection and let the main loop run for a 2 more
seconds for graceful disconnection."""
self.client.disconnect()
self.client.run(timeout = 2)
@event_handler(AuthorizedEvent)
def handle_authorized(self, event):
"""Send the initial presence after log-in."""
# pylint: disable=W0613
self.action(self.client)
self.client.disconnect()
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
"""Quit the main loop upon disconnection."""
# pylint: disable=W0613,R0201
return QUIT
def send_message(source_jid, password, target_jid, body, subject = None,
message_type = "chat", message_thread = None, settings = None):
"""Star an XMPP session and send a message, then exit.
:Parameters:
- `source_jid`: sender JID
- `password`: sender password
- `target_jid`: recipient JID
- `body`: message body
- `subject`: message subject
- `message_type`: message type
- `message_thread`: message thread id
- `settings`: other settings
:Types:
- `source_jid`: `pyxmpp2.jid.JID` or `basestring`
- `password`: `basestring`
- `target_jid`: `pyxmpp.jid.JID` or `basestring`
- `body`: `basestring`
- `subject`: `basestring`
- `message_type`: `basestring`
- `settings`: `pyxmpp2.settings.XMPPSettings`
"""
# pylint: disable=R0913,R0912
if sys.version_info.major < 3:
# pylint: disable-msg=W0404
from locale import getpreferredencoding
encoding = getpreferredencoding()
if isinstance(source_jid, str):
source_jid = source_jid.decode(encoding)
if isinstance(password, str):
password = password.decode(encoding)
if isinstance(target_jid, str):
target_jid = target_jid.decode(encoding)
if isinstance(body, str):
body = body.decode(encoding)
if isinstance(message_type, str):
message_type = message_type.decode(encoding)
if isinstance(message_thread, str):
message_thread = message_thread.decode(encoding)
if not isinstance(source_jid, JID):
source_jid = JID(source_jid)
if not isinstance(target_jid, JID):
target_jid = JID(target_jid)
msg = Message(to_jid = target_jid, body = body, subject = subject,
stanza_type = message_type)
def action(client):
"""Send a mesage `msg` via a client."""
client.stream.send(msg)
if settings is None:
settings = XMPPSettings({"starttls": True, "tls_verify_peer": False})
if password is not None:
settings["password"] = password
handler = FireAndForget(source_jid, action, settings)
try:
handler.run()
except KeyboardInterrupt:
handler.disconnect()
raise
# vi: sts=4 et sw=4
| apache-2.0 |
areitz/pants | src/python/pants/backend/core/targets/resources.py | 7 | 1552 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.target import Target
class Resources(Target):
"""A set of files accessible as resources from the JVM classpath.
Looking for loose files in your application bundle? Those are
`bundle <#bundle>`_\s.
Resources are Java-style resources accessible via the ``Class.getResource``
and friends API. In the ``jar`` goal, the resource files are placed in the resulting `.jar`.
"""
def __init__(self, address=None, payload=None, sources=None, **kwargs):
"""
:param sources: Files to "include". Paths are relative to the
BUILD file's directory.
:type sources: ``Fileset`` or list of strings
"""
payload = payload or Payload()
payload.add_fields({
'sources': self.create_sources_field(sources,
sources_rel_path=address.spec_path, key_arg='sources'),
})
super(Resources, self).__init__(address=address, payload=payload, **kwargs)
def has_sources(self, extension=None):
"""``Resources`` never own sources of any particular native type, like for example
``JavaLibrary``.
"""
# TODO(John Sirois): track down the reason for this hack and kill or explain better.
return extension is None
| apache-2.0 |
rcook/DesignLab | app/src/processing/app/i18n/python/requests/packages/urllib3/response.py | 227 | 7410 | # urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import gzip
import logging
import zlib
from io import BytesIO
from .exceptions import DecodeError
from .packages.six import string_types as basestring
log = logging.getLogger(__name__)
def decode_gzip(data):
gzipper = gzip.GzipFile(fileobj=BytesIO(data))
return gzipper.read()
def decode_deflate(data):
try:
return zlib.decompress(data)
except zlib.error:
return zlib.decompress(data, -zlib.MAX_WBITS)
class HTTPResponse(object):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = {
'gzip': decode_gzip,
'deflate': decode_deflate,
}
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self._decode_content = decode_content
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, decoding and caching
is skipped because we can't decode partial content nor does it make
sense to cache partial content as the full response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header. (Overridden if ``amt`` is set.)
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
decoder = self.CONTENT_DECODERS.get(content_encoding)
if decode_content is None:
decode_content = self._decode_content
if self._fp is None:
return
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
else:
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
return data
try:
if decode_content and decoder:
data = decoder(data)
except (IOError, zlib.error):
raise DecodeError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
| lgpl-2.1 |
dentaku65/plugin.video.pelisalacarta.it | channels/peliculasonlineflv.py | 12 | 9231 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasonlineflv
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "peliculasonlineflv"
__category__ = "F,D"
__type__ = "generic"
__title__ = "Peliculas Online FLV"
__language__ = "ES"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[peliculasonlineflv.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Novedades", action="peliculas", url="http://www.peliculasonlineflv.net"))
itemlist.append( Item(channel=__channel__, title="Por orden alfabético", action="letras", url="http://www.peliculasonlineflv.net"))
itemlist.append( Item(channel=__channel__, title="Por géneros", action="generos", url="http://www.peliculasonlineflv.net"))
itemlist.append( Item(channel=__channel__, title="Buscar...", action="search"))
return itemlist
def search(item,texto):
logger.info("[peliculasonlineflv.py] search")
if item.url=="":
item.url="http://www.peliculasonlineflv.net/buscar/?s="
texto = texto.replace(" ","+")
item.url = item.url + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def letras(item):
logger.info("[peliculasonlineflv.py] letras")
itemlist=[]
# Descarga la pagina
data = scrapertools.cachePage(item.url)
# Patron de las entradas
patron = '<li><a href="(/letra[^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
# Añade las entradas encontradas
for scrapedurl,scrapedtitle in matches:
title = scrapedtitle
url = urlparse.urljoin(item.url,scrapedurl)
scrapedplot = ""
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=title , url=url , thumbnail=thumbnail , plot=plot , folder=True) )
return itemlist
def generos(item):
logger.info("[peliculasonlineflv.py] generos")
itemlist=[]
# Descarga la pagina
data = scrapertools.cachePage(item.url)
# Patron de las entradas
patron = '<li><a href="(/genero[^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
# Añade las entradas encontradas
for scrapedurl,scrapedtitle in matches:
title = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
url = urlparse.urljoin(item.url,scrapedurl)
scrapedplot = ""
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=title , url=url , thumbnail=thumbnail , plot=plot , folder=True) )
return itemlist
def peliculas(item):
logger.info("[peliculasonlineflv.py] peliculas")
itemlist=[]
# Descarga la pagina
data = scrapertools.cachePage(item.url)
'''
<div class="pelis">
<a href="http://www.peliculasonlineflv.net/pelicula/io-e-te-tu-y-yo-2012-subtitulada/" title="Io e te (Tú y yo) (2012) - Subtitulada"><img class="port" src="http://www.peliculasonlineflv.net/img/1495.jpg" alt="Io e te (Tú y yo) (2012) - Subtitulada"></a>
<div class="pelis-desc">
<h3>Io e te (Tú y yo) (2012) - Subtitulada</h3>
<p class="desc-mid">
La pelicula trata sobre la historia de un adolescente de catorce años que engaña a sus padres con una coartada de una esquiada entre amigos para, en realidad, pasar esos días en un sótano con la intención de ayudar a su hermanastra mayor a superar su adicción a la heroína.
</p>
<p class="desc-low">
<span class="desc-item"><span class="bold">Reparto: </span> Tea Falco, Jacopo Olmo Antinori, Sonia Bergamasco, Veronica Lazar</span>
<span class="desc-item"><span class="bold">Director: </span> Bernardo Bertolucci</span>
<span class="desc-item"><span class="bold">Género: </span> Drama, Family</span>
</p>
</div>
'''
# Patron de las entradas
patron = '<div class="pelis"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"><img class="port" src="([^"]+)"[^<]+</a[^<]+'
patron += '<div class="pelis-desc"[^<]+'
patron += '<h3[^<]+</h3[^<]+'
patron += '<p class="desc-mid">([^<]+)</p>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
# Añade las entradas encontradas
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedplot in matches:
title = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
url = scrapedurl
plot = scrapedplot
thumbnail = scrapedthumbnail
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title=title , url=url , thumbnail=thumbnail , plot=plot , folder=True) )
patron = '<span class="actual">[^<]+</span[^<]+<a href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
siguiente_url = urlparse.urljoin(item.url,"/"+matches[0])
itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Página siguiente" , url=siguiente_url , folder=True) )
return itemlist
def findvideos(item):
logger.info("[peliculasonlineflv.py] findvideos")
itemlist=[]
# Descarga la p?gina
data = scrapertools.cachePage(item.url)
from servers import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
videoitem.title = "Ver en "+videoitem.server
videoitem.fulltitle = item.fulltitle
# Ahora busca patrones manuales
try:
vk_code = scrapertools.get_match(data,"vklat\=([a-zA-Z0-9]+)")
vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Latino)" , server="vk" , url=vk_url , folder=False ) )
except:
logger.info("No encontrado enlace VK")
try:
putlocker_code = scrapertools.get_match(data,"plat\=([A-Z0-9]+)")
putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Latino)" , server="putlocker" , url=putlocker_url , folder=False ) )
except:
logger.info("No encontrado enlace PUTLOCKER")
try:
vk_code = scrapertools.get_match(data,"vksub\=([a-zA-Z0-9]+)")
vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK (Subtitulado)" , server="vk" , url=vk_url , folder=False ) )
except:
logger.info("No encontrado enlace VK")
try:
putlocker_code = scrapertools.get_match(data,"plsub\=([A-Z0-9]+)")
putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker (Subtitulado)" , server="putlocker" , url=putlocker_url , folder=False ) )
except:
logger.info("No encontrado enlace PUTLOCKER")
try:
vk_code = scrapertools.get_match(data,"vk\=([a-zA-Z0-9]+)")
vk_url = scrapertools.get_header_from_response("http://goo.gl/"+vk_code,header_to_get="location")
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en VK" , server="vk" , url=vk_url , folder=False ) )
except:
logger.info("No encontrado enlace VK")
try:
putlocker_code = scrapertools.get_match(data,"put\=([A-Z0-9]+)")
putlocker_url = "http://www.putlocker.com/embed/"+putlocker_code
itemlist.append( Item( channel=__channel__ , action="play" , title="Ver en Putlocker" , server="putlocker" , url=putlocker_url , folder=False ) )
except:
logger.info("No encontrado enlace PUTLOCKER")
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
mainlist_items = mainlist(Item())
peliculas_items = peliculas(mainlist_items[0])
for pelicula_item in peliculas_items:
mirrors = findvideos( item=pelicula_item )
if len(mirrors)>0:
return True
return False | gpl-3.0 |
xen0l/ansible | lib/ansible/modules/system/runit.py | 64 | 8646 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author:
- James Sumners (@jsumners)
version_added: "2.3"
short_description: Manage runit services
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
description:
- Name of the service to manage.
required: yes
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
choices: [ killed, once, reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
type: bool
service_dir:
description:
- directory runsv watches for services
default: /var/service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
default: /etc/sv
'''
EXAMPLES = '''
- name: Start sv dnscache, if not running
runit:
name: dnscache
state: started
- name: Stop sv dnscache, if running
runit:
name: dnscache
state: stopped
- name: Kill sv dnscache, in all cases
runit:
name: dnscache
state: killed
- name: Restart sv dnscache, in all cases
runit:
name: dnscache
state: restarted
- name: Reload sv dnscache, in all cases
runit:
name: dnscache
state: reloaded
- name: Use alternative sv directory location
runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
# full_state *may* contain information about the logger:
# "down: /etc/service/service-without-logger: 1s, normally up\n"
# "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
full_state_no_logger = self.full_state.split("; ")[0]
m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
if m:
self.pid = m.group(1)
m = re.search(r' (\d+)s', full_state_no_logger)
if m:
self.duration = m.group(1)
if re.search(r'^run:', full_state_no_logger):
self.state = 'started'
elif re.search(r'^down:', full_state_no_logger):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
dist=dict(type='str', default='runit'),
service_dir=dict(type='str', default='/var/service'),
service_src=dict(type='str', default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e), exception=traceback.format_exc())
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv, state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
| gpl-3.0 |
Kazade/NeHe-Website | django/conf/locale/sr/formats.py | 655 | 1980 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
NeCTAR-RC/neutron | neutron/tests/unit/agent/l3/test_ha_router.py | 17 | 1879 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.l3 import ha_router
from neutron.openstack.common import uuidutils
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
# NOTE The use_namespaces config will soon be deprecated
self.agent_conf.use_namespaces = True
self.router_id = _uuid()
return ha_router.HaRouter(mock.sentinel.enqueue_state,
self.router_id,
router,
self.agent_conf,
mock.sentinel.driver,
**kwargs)
def test_get_router_cidrs_returns_ha_cidrs(self):
ri = self._create_router()
device = mock.MagicMock()
device.name.return_value = 'eth2'
addresses = ['15.1.2.2/24', '15.1.2.3/32']
ri._get_cidrs_from_keepalived = mock.MagicMock(return_value=addresses)
self.assertEqual(set(addresses), ri.get_router_cidrs(device))
| apache-2.0 |
predikto/python-sdk | predikto/errors.py | 1 | 1828 | # Copyright 2014-2015 Predikto, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class RequestError(Exception):
def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
def __str__(self):
message = "ERROR!"
if hasattr(self.response, 'status_code'):
message += " HTTP Status: %s." % (self.response.status_code)
if hasattr(self.response, 'message'):
message += " Message: %s." % (self.response.message)
if self.content is not None:
message += " Error: " + str(self.content)
return message
class MissingConfig(Exception):
pass
class ClientError(RequestError):
"""
Base
"""
pass
class InvalidResource(ClientError):
"""
400
"""
pass
class Unauthorized(ClientError):
"""
401
"""
pass
class Forbidden(ClientError):
"""
403
"""
pass
class ResourceNotFound(ClientError):
"""
404
"""
pass
class EntityTooLarge(ClientError):
"""
413
"""
pass
class ServerError(RequestError):
"""
500
"""
pass
class MethodNotAllowed(ClientError):
"""
405
"""
def allowed_methods(self):
return self.response['Allow']
| apache-2.0 |
ChenglongChen/typhoon-blade | src/test/query_target_test.py | 3 | 1590 | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the test module to test query function of blade.
"""
import blade_test
class TestQuery(blade_test.TargetTest):
"""Test cc_library """
def setUp(self):
"""setup method. """
self.doSetUp('test_query', full_targets=['...'], command='query')
self.query_targets = ['test_query:poppy']
self.all_targets = self.blade.get_build_targets()
def testQueryCorrectly(self):
"""Test query targets dependency relationship correctly. """
self.assertTrue(self.all_targets)
result_map = {}
result_map = self.blade.query_helper(self.query_targets)
all_targets = self.blade.get_build_targets()
query_key = ('test_query', 'poppy')
self.assertTrue(query_key in result_map.keys())
deps = result_map.get(query_key, [])[0]
depended_by = result_map.get(query_key, [])[1]
self.assertTrue(deps)
self.assertTrue(depended_by)
dep_one_key = ('test_query', 'rpc_meta_info_proto')
dep_second_key = ('test_query', 'static_resource')
self.assertTrue(dep_one_key in deps)
self.assertTrue(dep_second_key in deps)
depended_one_key = ('test_query', 'poppy_client')
depended_second_key = ('test_query', 'poppy_mock')
self.assertTrue(depended_one_key in depended_by)
self.assertTrue(depended_second_key in depended_by)
if __name__ == '__main__':
blade_test.run(TestQuery)
| bsd-3-clause |
molotof/infernal-twin | build/reportlab/src/reportlab/graphics/samples/clustered_bar.py | 42 | 4249 | #Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.samples.excelcolors import *
from reportlab.graphics.charts.barcharts import HorizontalBarChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
class ClusteredBar(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self._add(self,HorizontalBarChart(),name='chart',validate=None,desc="The main chart")
self.chart.width = 115
self.chart.height = 80
self.chart.x = 30
self.chart.y = 40
self.chart.bars[0].fillColor = color01
self.chart.bars[1].fillColor = color02
self.chart.bars[2].fillColor = color03
self.chart.bars[3].fillColor = color04
self.chart.bars[4].fillColor = color05
self.chart.bars[5].fillColor = color06
self.chart.bars[6].fillColor = color07
self.chart.bars[7].fillColor = color08
self.chart.bars[8].fillColor = color09
self.chart.bars[9].fillColor = color10
self.chart.fillColor = backgroundGrey
self.chart.barLabels.fontName = 'Helvetica'
self.chart.valueAxis.labels.fontName = 'Helvetica'
self.chart.valueAxis.labels.fontSize = 6
self.chart.valueAxis.forceZero = 1
self.chart.data = [(100, 150, 180), (125, 180, 200)]
self.chart.groupSpacing = 15
self.chart.valueAxis.avoidBoundFrac = 1
self.chart.valueAxis.gridEnd = 80
self.chart.valueAxis.tickDown = 3
self.chart.valueAxis.visibleGrid = 1
self.chart.categoryAxis.categoryNames = ['North', 'South', 'Central']
self.chart.categoryAxis.tickLeft = 3
self.chart.categoryAxis.labels.fontName = 'Helvetica'
self.chart.categoryAxis.labels.fontSize = 6
self.chart.categoryAxis.labels.dx = -3
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets'), (color02, 'Sprockets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextSpace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self._add(self,Label(),name='XLabel',validate=None,desc="The label on the horizontal axis")
self.XLabel.fontName = 'Helvetica'
self.XLabel.fontSize = 7
self.XLabel.x = 85
self.XLabel.y = 10
self.XLabel.textAnchor ='middle'
self.XLabel.maxWidth = 100
self.XLabel.height = 20
self.XLabel._text = "X Axis"
self._add(self,Label(),name='YLabel',validate=None,desc="The label on the vertical axis")
self.YLabel.fontName = 'Helvetica'
self.YLabel.fontSize = 7
self.YLabel.x = 12
self.YLabel.y = 80
self.YLabel.angle = 90
self.YLabel.textAnchor ='middle'
self.YLabel.maxWidth = 100
self.YLabel.height = 20
self.YLabel._text = "Y Axis"
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
ClusteredBar().save(formats=['pdf'],outDir=None,fnRoot='clustered_bar')
| gpl-3.0 |
Lilykos/invenio | invenio/legacy/elmsubmit/misc.py | 13 | 18787 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
Miscellaneous utlity functions that have the potential for re-use.
"""
__revision__ = "$Id$"
import tempfile
import os
import random
import stat
import textwrap
import re
def concat(list_of_lists):
return [item for list in list_of_lists for item in list]
def cleave_pair(list):
# Should really generalize this to the nth case; but I only need
# pairs right now!
"""
[1,2,3,4,5,6,7]
becomes
([1,3,5,7], [2,4,6])
"""
lefts = []
rights = []
k = (lefts, rights)
for x in range(0, len(list)):
k[x % 2].append(list[x])
return (lefts, rights)
def merge_pair(lefts, rights):
"""
[1,3,5,7], [2,4,6]
becomes
[1,2,3,4,5,6,7]
"""
k = (lefts, rights)
list = []
for x in range(0, len(lefts) + len(rights)):
(d, m) = divmod(x, 2)
list.append(k[m][d])
return list
def cr2lf(file):
"""
Replace CRLF with LF. ie. Convert text file from DOS to Unix end
of line conventions.
"""
return file.replace("\r\n", "\n")
# Directory backup using mirrordir:
def backup_directory(original_directory, backup_directory):
# Backing up the directory requires GNU mirrordir to be installed;
# shutil.copytree won't do the job if there are pipes or fifos
# etc. in my_directory.
# Implementing mirrordir directly in python would be a
# good project!
# mkdir will throw the correct errors for us:
os.mkdir(backup_directory)
commandline = 'mirrordir ' + original_directory + ' ' + backup_directory
# Run the process using popen3; possibly dodgy on Windows!
# Need popen3 rather other popen function because we want to
# grab stderr and hide it from the clients console.
(stdin, stdout, stderr) = os.popen3(commandline, 'r')
# Close straight away; mirrordir expects no input.
# return the exist status:
return stdout.close()
# Tempfile stuff:
def open_tempfile(mode='wb'):
# We open in binary mode and write a non-unicode string and so
# can be sure that python will write the data verbatim,
# without fiddling with CRLFs etc.
(tf_file_descriptor, tf_name) = tempfile.mkstemp()
tf = os.fdopen(tf_file_descriptor, mode)
return (tf, tf_name)
def write_to_and_return_tempfile_name(data):
(tf, tf_name) = open_tempfile()
tf.write(data)
tf.close()
return tf_name
def remove_tempfile(filename):
"""
Tries to unlink the named tempfile. Catches the OSError if
unlinking fails.
"""
try:
os.unlink(filename)
except OSError:
# Couldn't delete temp file; no big problem.
pass
# Random string stuff:
def random_alphanum_string(length, chars='abcdefghijklmnopqrstuvwxyz' ):
"""
Create a random string of given length, choosing each character
with equal probability from the list given in string chars. For
example: chars='aab' would cause each character to be 'a' with 2/3
probability and 'b' with 1/3 probability (pseudorandomly
speaking).
"""
alphanums = list(chars)
# Replicate list into a list of lists and map the random choice
# function over it:
choices = map(random.choice, [alphanums] * length)
# Concat the choices into a string:
return ''.join(choices)
def mapmany(functions, in_list):
# If functions equals [phi, ... , alpha, beta, gamma] return
# map(phi, ... map(alpha, map(beta, map(gamma, in_list))) ... )
functions.reverse()
g = lambda list, f: map(f, list)
return reduce(g, functions, in_list)
def dict2file(dictionary, directory):
"""
Take any dictionary, eg.:
{ 'title' : 'The loveliest title.',
'name' : 'Pete the dog.',
'info' : { 'age' : '21', 'evil' : 'yes' }
}
and create a set of files in the given directory:
directory/title
directory/name
directory/info/age
directory/info/evil
so that each filename is a dictionary key, and the contents of
each file is the value that the key pointed to.
"""
def f((path, dictionary_or_data)):
fullpath = os.path.join(directory, path)
try:
dictionary_or_data.has_key
except AttributeError:
open(fullpath, 'wb').write(dictionary_or_data)
else:
os.mkdir(fullpath)
dict2file(dictionary_or_data, fullpath)
print('dict.items', dictionary.items())
map(f, dictionary.items())
return None
def recursive_dir_contents(dir):
files = []
def f(arg, dirname, fnames):
files.extend(map(lambda file: os.path.join(dirname, file), fnames))
os.path.walk(dir, f, None)
return files
def count_dotdot(path):
path_parts = path.split(os.sep)
dotdots = filter(lambda part: part == '..', path_parts)
return len(dotdots)
def common_prefix(seq, default_empty=''):
try:
leng = 0
for tuple in zip(*seq):
if tuple[1:] != tuple[:-1]: break
leng += 1
return seq[0][:leng]
except TypeError: return default_empty
def split_common_path(thePaths):
# sanitze paths:
f = lambda x: os.path.normpath(os.path.expanduser(x))
thePaths = map(f, thePaths)
# thePaths is a list of paths (strings)
thePaths = map(lambda p: p.split(os.sep), thePaths)
# chop common part off the paths
theBase = common_prefix(thePaths, [])
thePaths = map(lambda p, c=len(theBase): p[c:], thePaths)
# convert back to strings
if theBase == ['']:
theBase = '/'
else:
theBase = os.sep.join(theBase)
thePaths = map(os.sep.join, thePaths)
return (theBase, thePaths)
def mkdir_parents(path):
tree = dirtree(path)
tree.reverse()
for parent in tree:
if os.path.exists(parent):
if os.path.isdir(parent):
continue
else:
# This will raise the correct OSError for us.
os.chdir(parent)
else:
os.mkdir(parent)
def dirtree(dir):
# sanitize path:
dir = os.path.normpath(os.path.expanduser(dir))
return _dirtree(dir)
def _dirtree(dir):
"""
An example will explain:
>>> elmsubmit_misc.dirtree('/hof/wim/sif/eff/hoo')
['/hof/wim/sif/eff/hoo',
'/hof/wim/sif/eff',
'/hof/wim/sif',
'/hof/wim',
'/hof',
'/']
"""
# POSIX allows // or / for the root dir.
# And it seems the rules say you aren't allowed to collapse // into /.
# I don't know why this is!
if dir == '//' or dir == '/':
return [dir]
elif dir == '':
return []
else:
return [dir] + _dirtree(os.path.dirname(dir))
def provide_dir_with_perms_then_exec(dir, function, perms, barrier_dir):
# This function won't allow you to alter the root directories'
# permissions: if your going to be changing the permissions on
# your root directory, you probably need to do it more carefully
# than with a python function!
# sanitize path:
dir = os.path.abspath(os.path.normpath(os.path.expanduser(dir)))
# Check to see if we're already in the state we want to be in:
try:
targets_current_perms = get_perms(dir)
targets_current_owner_uid = get_owner_uid(dir)
except OSError as e:
if e.errno == 2:
# dir definitely doesn't exist.
raise
elif e.errno == 13:
# don't have sufficient permissions to read the
# permissions.
dir_info_read = False
else:
dir_info_read = True
if dir_info_read and targets_current_owner_uid != os.geteuid():
# We don't own the file:
raise OSError("file %s not owned by this process's effective user: cannot proceed" % (dir))
elif dir_info_read and targets_current_perms & perms == perms:
# This directory already has user bits set to at least perms,
# so execute the given function:
return function()
# If we haven't exited the function already, we need to change the target dirs
# permissions (or simply couldn't read the permissions!)
# Get a list of all of the dirs parents:
dir_list = dirtree(dir)
if barrier_dir is not None:
# sanitize path:
barrier_dir = os.path.abspath(os.path.normpath(os.path.expanduser(barrier_dir)))
# Check the barrier dir is one of the parents of dir:
if not barrier_dir in dir_list[1:]:
raise ValueError('argument barrier_dir must be a proper parent directory of argument dir')
# Get a list of all the directories that lie between the
# barrier dir and the target dir, including the barrier dir,
# but excluding the target dir:
barrier_dir_list = dirtree(barrier_dir)
g = lambda d: (d == barrier_dir) or (not (d in barrier_dir_list or d == dir))
operable_parent_dirs = filter(g, dir_list)
else:
operable_parent_dirs = dir_list
# Make sure we have at least wx permissions on parent:
parents_old_states = _get_perms_on(operable_parent_dirs, perms=0300)
# Now stat the target dir if we didn't manage previously:
if not dir_info_read:
try:
targets_current_perms = get_perms(dir)
targets_current_owner_uid = get_owner_uid(dir)
except OSError as e:
if e.errno == 2:
# race condition:
raise OSError("Directory structure altered during processing: %s removed during processing" % (dir))
elif e.errno == 13:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dir_list))
if targets_current_owner_uid != os.geteuid():
# We don't own this file and so can't chmod it: We
# couldn't see this previously because we didn't
# have permission to stat the dir. Undo the
# permission changes we've already made and report
# the error:
_safely_chmod_dirlist(parents_old_states)
raise OSError("file %s not owned by this process's effective user: cannot proceed" % (dir))
elif targets_current_perms & perms == perms:
# We already have the perms we need.
try:
return_value = function()
finally:
_safely_chmod_dirlist(parents_old_states)
return return_value
# Now change the permissions of our target directory:
try:
os.chmod(dir, perms | targets_current_perms)
except OSError:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dir_list))
try:
# Now permissions are open, exec our function:
return_value = function()
finally:
# Close up the permissions we had to open:
_safely_chmod_dirlist([[dir, targets_current_perms]] + parents_old_states)
# Return the input functions return value:
return return_value
def _get_perms_on(dirlist, perms=0300):
# Note: any comment labelling a particular error as "race
# condition" is meant to indicate an error that can only arise if
# another process is attempting to alter the directory strucutre
# at the same time as us - this function _must not_ be used if
# such a situation is possible.
# User perms < rx doesn't make sense for this function. You need
# at least wx bits on a directory to change the permissions on its
# child directories.
if perms < 0300: raise ValueError("argument perms must be >= 3 in the user byte")
dir = dirlist[0]
remaining_dirs = dirlist[1:]
try:
targets_current_perms = get_perms(dir)
targets_current_owner_uid = get_owner_uid(dir)
except OSError as e:
if e.errno == 2:
# dir definitely doesn't exist.
raise
elif e.errno == 13:
# don't have sufficient permissions to read the
# permissions.
dir_info_read = False
else:
dir_info_read = True
if dir_info_read and targets_current_owner_uid != os.geteuid():
# We don't own the file:
raise OSError("file %s not owned by this process's effective user: cannot proceed" % (dir))
elif dir_info_read and targets_current_perms & perms == perms:
# This directory already has user bits set to at least perms,
# so nothing to do:
return []
elif dir_info_read and targets_current_perms & perms != perms:
# We need to adjust the permissions. See if the parent will
# let us:
if remaining_dirs == []:
# We have no parents available:
raise OSError("no members of the given dirtree have sufficient permissions for us to chmod")
else:
parent = remaining_dirs[0]
# Figure out if we're the owner of the parent and have permissions
try:
parents_current_perms = get_perms(parent)
parents_current_owner_uid = get_owner_uid(parent)
except OSError as e:
if e.errno == 2:
# dir definitely doesn't exist.
raise
elif e.errno == 13:
# don't have sufficient permissions to read the
# permissions.
parent_dir_info_read = False
else:
parent_dir_info_read = True
if parent_dir_info_read and parents_current_owner_uid == os.geteuid() and parents_current_perms & 0300 == 0300:
# We own the parent and have sufficient permission to chmod its contents:
try:
os.chmod(dir, perms | targets_current_perms)
except OSError:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dirlist))
return [[dir, targets_current_perms]]
else:
# We need to step down a level:
pass
else: # dir info was not read.
if remaining_dirs == []:
raise OSError("no members of the given dirtree have sufficient permissions for us to chmod")
# If the prior if-then-else didn't return or throw an error then
# either we couldn't stat the given dir or we don't have
# permission to change its permissions, so therefore we need to
# step down a level:
parents_old_states = _get_perms_on(remaining_dirs, perms)
if not dir_info_read:
try:
targets_current_perms = get_perms(dir)
targets_current_owner_uid = get_owner_uid(dir)
except OSError as e:
if e.errno == 2:
# race condition:
raise OSError("Directory structure altered during processing: %s removed during processing" % (dir))
elif e.errno == 13:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dirlist))
if targets_current_owner_uid != os.geteuid():
# We don't own this file and so can't chmod it: We
# couldn't see this previously because we didn't
# have permission to stat the dir. Undo the
# permission changes we've already made and report
# the error:
_safely_chmod_dirlist(parents_old_states)
raise OSError("file %s not owned by this process's effective user: cannot proceed" % (dir))
elif targets_current_perms & perms == perms:
# current directory already has the permissions we
# want; previously the parent's perms were preventing
# us from seeing this:
return parents_old_states
else:
# current directory's permissions need altering:
# Set the user bits to at least perms:
try:
os.chmod(dir, perms | targets_current_perms)
except OSError:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dirlist))
return [[dir, targets_current_perms]] + parents_old_states
else:
# current directory's permissions need altering:
# Set the user bits to at least perms:
try:
os.chmod(dir, perms | targets_current_perms)
except OSError:
# race condition:
raise OSError("Directory structure %s altered during processing: permissions changed during processing" % (dirlist))
return [[dir, targets_current_perms]] + parents_old_states
def _safely_chmod_dirlist(dirlist):
return [os.chmod(dir, perms) for dir, perms in dirlist]
def get_perms(path):
return stat.S_IMODE(os.stat(path)[stat.ST_MODE])
def get_owner_uid(path):
return os.stat(path)[stat.ST_UID]
# Text utils:
def wrap_text(text, cols=80):
print("text", text)
parts = re.split(r'(\n(?:\s*\n))+', text)
(paragraphs, whitespace) = cleave_pair(parts)
for x in parts:
print(">>", x)
print("paras", paragraphs)
print("white", whitespace)
wrapped_paragraphs = map(lambda t: textwrap.fill(t, width=cols), paragraphs)
print(wrapped_paragraphs)
return ''.join(merge_pair(wrapped_paragraphs, whitespace))
# Module utils:
def import_dots(string):
"""
Note that if you execute:
mod = __import__('one.two.three')
then variable mod will point to module one, not module
'one.two.three'.
whereas:
mod = import_dots('one.two.three')
will point to module 'one.two.three'.
"""
mod = __import__(string)
components = string.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
| gpl-2.0 |
bealdav/OCB | addons/base_iban/__init__.py | 447 | 1073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_iban
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jobscore/sync-engine | migrations/versions/082_event_participants.py | 9 | 1401 | """event participants
Revision ID: 1322d3787305
Revises: 4e3e8abea884
Create Date: 2014-08-15 20:53:36.656057
"""
# revision identifiers, used by Alembic.
revision = '1322d3787305'
down_revision = '1bc2536b8bc6'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'eventparticipant',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('public_id', sa.BINARY(length=16), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('_raw_address', sa.String(length=191), nullable=True),
sa.Column('_canonicalized_address', sa.String(length=191),
nullable=True),
sa.Column('status', sa.Enum('yes', 'no', 'maybe', 'awaiting'),
default='awaiting', nullable=False),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['event.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('_raw_address', 'event_id', name='uid'))
def downgrade():
op.drop_table('eventparticipant')
| agpl-3.0 |
kunalghosh/T-61.5060-Algorithmic-Methods-of-Data-Mining | source/approximate/f2_new2_2.py | 1 | 6873 | from __future__ import print_function
import sys
import numpy as np
import cPickle as pkl
import pprint
RAND_NUMS = 100000000
def getEstimatedF2(counts, n):
# Mean of all values.
return np.mean(n*(2*np.asarray(counts) - 1))
# # def get_next_rand(rands):
# retVal = -1
# try:
# retVal = rands.pop()
# # retVal = rands[-1]
# # rands = rands[:-1]
# except IndexError as e:
# rands.extend(np.random.rand(RAND_NUMS))
# retVal = rands.pop()
# # rands = np.random.rand(RAND_NUMS)
# # retVal = rands[-1]
# # rands = rands[:-1]
# return retVal
def get_next_rand(rands):
retVal = -1
if len(rands) == 0:
rands = np.random.rand(RAND_NUMS)
retVal = rands[-1]
rands = rands[:-1]
return retVal, rands
# def get_rand_idxes(rand_idxes,low,high):
# retVal = -1
# try:
# retVal = rand_idxes[(low,high)].pop()
# except IndexError as e:
# rand_idxes[(low,high)].extend(np.random.randint(low,high,RAND_NUMS))
# except KeyError as e:
# rand_idxes[(low,high)] = list(np.random.randint(low,high,RAND_NUMS))
# finally:
# retVal = rand_idxes[(low,high)].pop()
# return retVal
# def get_rand_idxes(rand_idxes,low,high):
# retVal = -1
# try:
# retVal = rand_idxes.pop()
# # retVal = rand_idxes[-1]
# # rand_idxes = rand_idxes[:-1]
# except IndexError as e:
# rand_idxes.extend(np.random.randint(low,high,RAND_NUMS))
# rand_idxes.pop()
# # rand_idxes = np.random.randint(low,high,RAND_NUMS)
# # retVal = rand_idxes[-1]
# # rand_idxes = rand_idxes[:-1]
# return retVal
# def get_rand_idxes(rand_idxes,low,high):
# retVal = -1
# if len(rand_idxes) == 0:
# rand_idxes = np.random.randint(low,high,RAND_NUMS)
# retVal = rand_idxes[-1]
# rand_idxes = rand_idxes[:-1]
# return retVal, rand_idxes
if __name__ == "__main__":
rands = []
# rand_idxes = {}
rand_idxes = []
outputFile = "f2_values_new_2Jan_new.txt"
dataPkl = 'data.pkl'
S = 4100000
# S = 200
# S = 10
if len(sys.argv) == 2:
try:
S = int(sys.argv[1])
except ValueError as e:
outputFile = sys.argv[1]
if len(sys.argv) == 3:
S = int(sys.argv[1])
outputFile = sys.argv[2]
X_listIdx = {} # Stores {line : [Idx in X_xcount where line appears]}
X_xcount = [] # Stores [line,count] (lines can be duplicates)
n = -1
counter = 0
batch_updater = {}
for line in sys.stdin:
# print(len(rands), len(rand_idxes))
n += 1
# if n % RAND_NUMS == 0:
# print("{} Rows done.".format(n))
if n < S: # pick the first S elements of the stream
X_xcount.append([line, 1]) # line and its count
if line in X_listIdx:
X_listIdx[line].append(n) # store the index of the element
else:
X_listIdx[line] = [n]
else:
prob = (S*1.0)/n # probability of picking the n'th element
# randVal,rands = get_next_rand(rands)
# if len(rand_idxes) == 0:
# rand_idxes = np.random.randint(low,high,RAND_NUMS)
try:
randVal = rands[-1]
rands = rands[:-1]
except IndexError as e:
rands = np.random.rand(RAND_NUMS)
randVal = rands[-1]
rands = rands[:-1]
if randVal < prob: # Very high probability of picking a line close whose index is to S
#---
# Remove an element
#---
# pick an index from X_xcount to remove ( equal probability of each element )
# idx_to_del,rand_idxes = get_rand_idxes(rand_idxes,0,S)
# if len(rand_idxes) == 0:
# rand_idxes = np.random.randint(low,high,RAND_NUMS)
try:
idx_to_del = rand_idxes[-1]
rand_idxes = rand_idxes[:-1]
except IndexError as e:
rand_idxes = np.random.randint(0,S,RAND_NUMS)
idx_to_del = rand_idxes[-1]
rand_idxes = rand_idxes[:-1]
# while(True):
# # if X_listIdx[line_to_del] is empty delete it and continue
# try:
# line_to_del = X_listIdx.keys()[get_rand_idxes(rand_idxes,0,len(X_listIdx),S)]
# # list_idx = get_rand_idxes(rand_idxes,0,len(X_listIdx[line_to_del]))
# # try:
# # X_listIdx[line_to_del][list_idx], X_listIdx[line_to_del][-1] = X_listIdx[line_to_del][-1], X_listIdx[line_to_del][list_idx]
# # except Exception as e:
# # print(X_listIdx[line_to_del], list_idx)
# # sys.exit(1)
# idx_to_del = X_listIdx[line_to_del].pop()
# except IndexError as e:
# X_listIdx.pop(line_to_del,None)
# continue
# else:
# break
X_xcount[idx_to_del] = [line, 1]
#if line in X_listIdx:
# X_listIdx[line].append(idx_to_del)
#else:
# X_listIdx[line] = [idx_to_del]
else:
# The line was not picked.
# Increment X_xcount[i][1] where X_xcount[i][0] == line
# if line in X_listIdx:
# idxes = X_listIdx[line]
# for idx in idxes:
# X_xcount[idx][1] += 1
counter += 1
if counter != 500:
if line in batch_updater:
batch_updater[line] += 1
else:
batch_updater[line] = 1
else:
for idx,line in enumerate(X_xcount):
line_str = line[0]
if line_str in batch_updater:
X_xcount[idx][1] += batch_updater[line_str]
batch_updater = {}
counter = 0
strs,counts = zip(*X_xcount)
counts = np.asarray(counts)
vals = (n+1) * (2*counts-1)
np.savetxt(outputFile, vals, fmt="%d")
# vals = []
# for entry in X_xcount:
# val.append((n+1)*(2*entry[1]-1))
# with open(outputFile,"w") as oFile:
# # print(val)
# # print(val, file=oFile)
# oFile.write("{}\n".format(val))
# print("Estimated F2 {}".format(int(getEstimatedF2(zip(*X_xcount)[1], n+1))))
print("{}".format(int(getEstimatedF2(zip(*X_xcount)[1], n+1))))
| mit |
jpush/jbox | Server/venv/lib/python3.5/site-packages/sqlalchemy/event/attr.py | 55 | 12053 | # event/attr.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import, with_statement
from .. import util
from ..util import threading
from . import registry
from . import legacy
from itertools import chain
import weakref
import collections
class RefCollection(util.MemoizedSlots):
__slots__ = 'ref',
def _memoized_attr_ref(self):
return weakref.ref(self, registry._collection_gced)
class _ClsLevelDispatch(RefCollection):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = ('name', 'arg_names', 'has_kw',
'legacy_signatures', '_clslevel', '__weakref__')
def __init__(self, parent_dispatch_cls, fn):
self.name = fn.__name__
argspec = util.inspect_getargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.keywords)
self.legacy_signatures = list(reversed(
sorted(
getattr(fn, '_legacy_signatures', []),
key=lambda s: s[0]
)
))
fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = collections.deque()
self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = collections.deque()
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def update_subclass(self, target):
if target not in self._clslevel:
self._clslevel[target] = collections.deque()
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
fn for fn
in self._clslevel[cls]
if fn not in clslevel
])
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ClsLevelDispatch at the class level of
a dispatcher, this returns self.
"""
return self
class _InstanceLevelDispatch(RefCollection):
__slots__ = ()
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_InstanceLevelDispatch):
"""Serves as a proxy interface to the events
served by a _ClsLevelDispatch, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
propagate = frozenset()
listeners = ()
__slots__ = 'parent', 'parent_listeners', 'name'
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _ClsLevelDispatch
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.name
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._instance_cls)
if getattr(obj, self.name) is self:
setattr(obj, self.name, result)
else:
assert isinstance(getattr(obj, self.name), _JoinedListener)
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_InstanceLevelDispatch):
__slots__ = '_exec_once_mutex', '_exec_once'
def _memoized_attr__exec_once_mutex(self):
return threading.Lock()
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
finally:
self._exec_once = True
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(_CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
__slots__ = (
'parent_listeners', 'parent', 'name', 'listeners',
'propagate', '__weakref__')
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self._exec_once = False
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.name
self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [l for l
in other.listeners
if l not in existing_listener_set
and not only_propagate or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key.prepend_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key.append_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners.clear()
class _JoinedListener(_CompoundListener):
__slots__ = 'parent', 'name', 'local', 'parent_listeners'
def __init__(self, parent, name, local):
self._exec_once = False
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
| mit |
liardety/pratical | pratical2/TP2/build/ThirdParty/src/googletest/test/gtest_list_tests_unittest.py | 1898 | 6515 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
huwei/wechat-python-sdk | wechat_sdk/messages.py | 20 | 4219 | # -*- coding: utf-8 -*-
# From: https://github.com/whtsky/WeRoBot/blob/develop/werobot/messages.py
from .exceptions import ParseError
MESSAGE_TYPES = {}
def handle_for_type(type):
def register(f):
MESSAGE_TYPES[type] = f
return f
return register
class WechatMessage(object):
def __init__(self, message):
self.id = int(message.pop('MsgId', 0))
self.target = message.pop('ToUserName', None)
self.source = message.pop('FromUserName', None)
self.time = int(message.pop('CreateTime', 0))
self.__dict__.update(message)
@handle_for_type('text')
class TextMessage(WechatMessage):
def __init__(self, message):
self.content = message.pop('Content', '')
super(TextMessage, self).__init__(message)
@handle_for_type('image')
class ImageMessage(WechatMessage):
def __init__(self, message):
try:
self.picurl = message.pop('PicUrl')
self.media_id = message.pop('MediaId')
except KeyError:
raise ParseError()
super(ImageMessage, self).__init__(message)
@handle_for_type('video')
class VideoMessage(WechatMessage):
def __init__(self, message):
try:
self.media_id = message.pop('MediaId')
self.thumb_media_id = message.pop('ThumbMediaId')
except KeyError:
raise ParseError()
super(VideoMessage, self).__init__(message)
@handle_for_type('shortvideo')
class ShortVideoMessage(WechatMessage):
def __init__(self, message):
try:
self.media_id = message.pop('MediaId')
self.thumb_media_id = message.pop('ThumbMediaId')
except KeyError:
raise ParseError()
super(ShortVideoMessage, self).__init__(message)
@handle_for_type('location')
class LocationMessage(WechatMessage):
def __init__(self, message):
try:
location_x = message.pop('Location_X')
location_y = message.pop('Location_Y')
self.location = (float(location_x), float(location_y))
self.scale = int(message.pop('Scale'))
self.label = message.pop('Label')
except KeyError:
raise ParseError()
super(LocationMessage, self).__init__(message)
@handle_for_type('link')
class LinkMessage(WechatMessage):
def __init__(self, message):
try:
self.title = message.pop('Title')
self.description = message.pop('Description')
self.url = message.pop('Url')
except KeyError:
raise ParseError()
super(LinkMessage, self).__init__(message)
@handle_for_type('event')
class EventMessage(WechatMessage):
def __init__(self, message):
message.pop('type')
try:
self.type = message.pop('Event').lower()
if self.type == 'subscribe' or self.type == 'scan':
self.key = message.pop('EventKey', None)
self.ticket = message.pop('Ticket', None)
elif self.type in ['click', 'view', 'scancode_push', 'scancode_waitmsg',
'pic_sysphoto', 'pic_photo_or_album', 'pic_weixin', 'location_select']:
self.key = message.pop('EventKey')
elif self.type == 'location':
self.latitude = float(message.pop('Latitude'))
self.longitude = float(message.pop('Longitude'))
self.precision = float(message.pop('Precision'))
elif self.type == 'templatesendjobfinish':
self.status = message.pop('Status')
except KeyError:
raise ParseError()
super(EventMessage, self).__init__(message)
@handle_for_type('voice')
class VoiceMessage(WechatMessage):
def __init__(self, message):
try:
self.media_id = message.pop('MediaId')
self.format = message.pop('Format')
self.recognition = message.pop('Recognition', None)
except KeyError:
raise ParseError()
super(VoiceMessage, self).__init__(message)
class UnknownMessage(WechatMessage):
def __init__(self, message):
self.type = 'unknown'
super(UnknownMessage, self).__init__(message)
| bsd-2-clause |
pavelchristof/gomoku-ai | tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py | 41 | 6317 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testNamePropertyIsSetByInitArg(self):
with self.test_session():
mu = [1., 2.]
sigma = [[1., 0.], [0., 1.]]
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy")
self.assertEqual(mvn.name, "Billy")
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.test_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.test_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.test_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.test_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nat13ejo/garn | garn/wire_3d.py | 1 | 9734 | import kwant
from math import sqrt
from matplotlib import pyplot
import numpy as np
from garn.geometry import hexagon, extension
from garn.system_wide import Wire
class Wire3D(Wire):
"""Wire3D facilitates the modelling of nanowire contact geometries in
Kwant by actings as a help in constructing a hexagonal nanowire
and attaching customizabel contacts in each end.
"""
def __init__(self, base=3, wire_length=30, lead_length=5,
identifier="unnamed", file_name="", step_length=1,
start_top=True, start_right=True, start_left=True,
start_bottom=False, end_top=True, end_right=True,
end_left=True, end_bottom=False):
"""A Instance of Wire3D describes the properties of a 3D nanowire
.. warning:: If keyword parameter `file_name` is set to
anything else than "" all other parameters are
ignored. It is only to facilitate the use of
parameter `file_name` that `base`, `wire_length`
and, `lead_length` parameters are optional.
Parameters
----------
base : int or float, optional
Width of wire.
wire_length : int or float, optional
Length of complete wire including leads.
lead_length : int or float, optional
Length of lead-wire interface in direction of the
Other Parameters
----------------
indentifier : str, optional
Identifies the wire represented in plots and data files produced by garn.
step_length : int or float, optional
Discretization step.
start_top : bool, optional
Boolian vaules of there should be a lead on the top at
the start of the wire
start_right : bool, optional
Boolian vaules of there should be a lead on the right side at
the start of the wire.
start_left : bool, optional
Boolian vaules of there should be a lead on the left side at
the start of the wire.
start_bottom : bool, optional
Boolian vaules of there should be a lead on the bottom at
the start of the wire
end_top : bool, optional
Boolian vaules of there should be a lead on the top at
the end of the wire.
end_right : bool, optional
Boolian vaules of there should be a lead on the right side at
the end of the wire.
end_left : bool, optional
Boolian vaules of there should be a lead on the left side at
the end of the wire.
end_bottom : bool, optional
Boolian vaules of there should be a lead on the bottom at
the end of the wire.
file_name : str, optional
Uses the data-file specified by the str to create a the
instance.
"""
Wire.__init__(self, base=base, wire_length=wire_length,
lead_length=lead_length, identifier=identifier,
file_name=file_name, step_length=step_length,
start_top=start_top, start_right=start_right,
start_left=start_left,
start_bottom=start_bottom, end_top=end_top,
end_right=end_right, end_left=end_left,
end_bottom=end_bottom)
self.lattice = self._lattice()
self._make_system()
#---------------------------------------------------------------------
# Internal functions
#---------------------------------------------------------------------
def _attach_leads(self, lead_start_top, lead_start_side, lead_end_top,
lead_end_side):
"""Attaches leads to system according to the self.leads list
Parameters
----------
lead_start_top : Builder_ with 1D translational symmetry in z-direction
Builder of the lead which is to be attached on the top of
the beginning.
lead_start_side : Builder_ with 1D translational symmetry in x-direction
Builder of the lead which is to be attached on the side of
the beginning.
lead_end_top : Builder_ with 1D translational symmetry in z-direction
Builder of the lead which is to be attached on the top of
the end.
lead_end_side : Builder_ with 1D translational symmetry in x-direction
Builder of the lead which is to be attached on the side of
the end.
.. _Builder:: http://kwant-project.org/doc/1.0/reference/generated/kwant.builder.Builder#kwant.builder.Builder
"""
if self.leads[0]:
self.sys.attach_lead(lead_start_top)
if self.leads[1]:
self.sys.attach_lead(lead_start_side)
if self.leads[2]:
self.sys.attach_lead(lead_start_side.reversed())
if self.leads[3]:
self.sys.attach_lead(lead_start_top.reversed())
if self.leads[4]:
self.sys.attach_lead(lead_end_top)
if self.leads[5]:
self.sys.attach_lead(lead_end_side)
if self.leads[6]:
self.sys.attach_lead(lead_end_side.reversed())
if self.leads[7]:
self.sys.attach_lead(lead_end_top.reversed())
def _make_system(self):
"""Fills the Builder object with sites and hoppings.
This is were the sites in the scattering region are added to
the kwant.Builder object and functions to create leads and
attach them are called. Welcome to the heart of
:class:`garn.Wire3D`.
"""
#add sites in scattering region
self.sys[self.lattice.shape(
self._hexagon_wire, (0, 0, 0))] = self._onsite
self.sys[self.lattice.neighbors()] = - self.t
lead_start_top, lead_end_top = self._create_leads((0, 0, self.a))
lead_start_side, lead_end_side = self._create_leads((self.a, 0, 0))
self._attach_leads(lead_start_top, lead_start_side,
lead_end_top, lead_end_side)
#self.system_plot()
self.sys = self.sys.finalized()
def _hexagon_wire(self, pos):
""" Find out if the position is inside a hexagonal wire."""
x, y, z = pos
if (hexagon((x, z), self.base) & (y >= 0) & (y < self.wire_length)):
return True
else:
return False
def _positions_of_leads(self):
"""Calculate positions from where to start fill leads
Returns
-------
start_top_site: tuple of 3 floats
Top left corner of rectange enclosing the hexagon of the
beggining of the wire.
end_top_site: tuple of 3 floats
Top left corner of rectange enclosing the hexagon of the
wire at a the begging of the lead at the end of the wire.
Notes
-----
Explaining these positions are messy so here is
some math instead.
.. math::
start_top_site = (-\dfrac{base}{2}, 0, \dfrac{\sqrt{3}base}{2}) \
start_end_site = (-\dfrac{base}\2}, wire_length - lead_length, \dfrac{\sqrt{3}base}{2})
"""
xs, ys, zs = self.lattice.closest(( - self.base / 2.0, 0,
sqrt(3) / 2.0 *
self.base))
xe, ye, ze = self.lattice.closest( (- self.base / 2.0,
self.wire_length -
self.lead_length, sqrt(3)
/ 2.0 * self.base))
start_top_site = (xs, ys, zs)
end_top_site = (xe, ye, ze)
return start_top_site, end_top_site
def _lattice(self):
# Set lattice vectors for lattice object
basis_vectors = ((self.a, 0, 0), (0, self.a, 0), (0, 0, self.a))
# return the lattice object
return kwant.lattice.Monatomic(basis_vectors)
def _onsite(self, args):
# + kwant.digest.gauss(str(site.pos))
return 6 * self.t
def _fill_lead(self, lead, position, side=False):
x, y, z = position
start_x = -self.base + 1
if not side:
lead[[self.lattice(i, j, 0) for i in range(-self.base+1,
self.base) for j in range(y, y +
self.lead_length)]] = 6 * self.t
return lead
if side:
lead[[self.lattice(0, j, k) for j in
range(y, y + self.lead_length) for k in
range(int(-self.base * sqrt(3) / 2.0),
int(self.base * sqrt(3) / 2.0)+1)]] = 6 * self.t
return lead
def _create_leads(self, sym):
""" Return lead at the start and end of wire with symetry sym"""
if (sym == (self.a, 0, 0)):
side = True
else:
side = False
lead_start = kwant.Builder(
kwant.TranslationalSymmetry(sym))
lead_end = kwant.Builder(
kwant.TranslationalSymmetry(sym))
pos_start, pos_end = self._positions_of_leads()
lead_end = self._fill_lead(lead_end, pos_end, side)
lead_start = self._fill_lead(lead_start, pos_start, side)
lead_end[self.lattice.neighbors()] = -self.t
lead_start[self.lattice.neighbors()] = -self.t
return lead_start, lead_end
| mit |
SimplyKnownAsG/mpi-action-framework | tests/py/test_maf_comm.py | 1 | 1542 | from mpi4py.MPI import COMM_WORLD
import maf
@maf.test
def test_bcast_string(self):
'''here is some doc for you'''
maf.log('here.')
world = maf.MafComm.World
maf.log('here..')
val = 'm' * (world.rank + 3)
maf.log('here...')
if world.rank == 0:
self.assert_equal("mmm", val)
else:
self.assert_not_equal("mmm", val)
maf.log('here....')
val = world.bcast(val)
maf.log('here.....')
self.assert_equal("mmm", val)
@maf.test
def test_bcast_int(self):
world = maf.MafComm.World
val = (world.rank + 3) * 3
if (world.rank == 0):
self.assert_equal(9, val)
else:
self.assert_not_equal(9, val)
val = world.bcast(val)
self.assert_equal(9, val)
@maf.test
def test_bcast_float(self):
world = maf.MafComm.World
val = (world.rank + 3.3) * 3
if (world.rank == 0):
self.assert_close(9.9, val, 1e-5)
else:
self.assert_not_close(9.9, val, 1e-2)
val = world.bcast(val)
self.assert_close(9.9, val, 1e-5)
@maf.test
def test_bcast_double(self):
world = maf.MafComm.World
val = (world.rank + 3.3) * 3
if (world.rank == 0):
self.assert_close(9.9, val, 1e-5)
else:
self.assert_not_close(9.9, val, 1e-2)
val = world.bcast(val)
self.assert_close(9.9, val, 1e-5)
if __name__ == '__main__':
maf.barrier('ERROR: these tests fail due to an error in SWiG. See https://github.com/swig/swig/issues/773')
# controller = maf.TestController()
# controller.start()
| apache-2.0 |
jm-begon/scikit-learn | sklearn/decomposition/incremental_pca.py | 199 | 10508 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <kastnerkyle@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _batch_mean_variance_update
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to ``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when ``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.mean_ = None
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
self.var_ = None
self.n_samples_seen_ = 0
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch])
return self
def partial_fit(self, X, y=None):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0]
!= self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." % (
self.components_.shape[0], self.n_components_))
if self.components_ is None:
# This is the first pass through partial_fit
self.n_samples_seen_ = 0
col_var = X.var(axis=0)
col_mean = X.mean(axis=0)
X -= col_mean
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_samples)
else:
col_batch_mean = X.mean(axis=0)
col_mean, col_var, n_total_samples = _batch_mean_variance_update(
X, self.mean_, self.var_, self.n_samples_seen_)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ -
col_batch_mean)
X_combined = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X,
mean_correction))
U, S, V = linalg.svd(X_combined, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_total_samples)
self.n_samples_seen_ += n_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
| bsd-3-clause |
paris-ci/CloudBot | cloudbot/util/timeformat.py | 2 | 6480 | """
timeformat.py
Contains functions to format time periods. Based on code from the Django project and CloudBot contributors.
The licensing for this module isn't solid, because I started working on this module before I had a proper
system for tracking code licences. If your code is in this file and you have any queries, contact me by
email at <lukeroge@gmail.com>!
Maintainer:
- Luke Rogers <https://github.com/lukeroge>
License:
BSD license
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import datetime
from cloudbot.util import formatting
def time_since(d, now=None, count=2, accuracy=6, simple=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
This function has a number of optional arguments that can be combined:
SIMPLE: displays the time in a simple format
>> timesince(SECONDS)
1 hour, 2 minutes and 34 seconds
>> timesince(SECONDS, simple=True)
1h 2m 34s
COUNT: how many periods should be shown (default 3)
>> timesince(SECONDS)
147 years, 9 months and 8 weeks
>> timesince(SECONDS, count=6)
147 years, 9 months, 7 weeks, 18 hours, 12 minutes and 34 seconds
"""
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
if isinstance(now, int) or isinstance(now, float):
now = datetime.datetime.fromtimestamp(now)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return '0 ' + 'minutes'
# pass the number in seconds on to format_time to make the output string
return format_time(since, count, accuracy, simple)
# compatibility
timesince = time_since
def time_until(d, now=None, count=2, accuracy=6, simple=False):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return time_since(now, d, count, accuracy, simple)
# compatibility
timeuntil = time_until
def format_time(seconds, count=3, accuracy=6, simple=False):
"""
Takes a length of time in seconds and returns a string describing that length of time.
This function has a number of optional arguments that can be combined:
SIMPLE: displays the time in a simple format
>> format_time(SECONDS)
1 hour, 2 minutes and 34 seconds
>> format_time(SECONDS, simple=True)
1h 2m 34s
COUNT: how many periods should be shown (default 3)
>> format_time(SECONDS)
147 years, 9 months and 8 weeks
>> format_time(SECONDS, count=6)
147 years, 9 months, 7 weeks, 18 hours, 12 minutes and 34 seconds
"""
if simple:
periods = [
('c', 60 * 60 * 24 * 365 * 100),
('de', 60 * 60 * 24 * 365 * 10),
('y', 60 * 60 * 24 * 365),
('mo', 60 * 60 * 24 * 30),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1)
]
else:
periods = [
(('century', 'centuries'), 60 * 60 * 24 * 365 * 100),
(('decade', 'decades'), 60 * 60 * 24 * 365 * 10),
(('year', 'years'), 60 * 60 * 24 * 365),
(('month', 'months'), 60 * 60 * 24 * 30),
(('day', 'days'), 60 * 60 * 24),
(('hour', 'hours'), 60 * 60),
(('minute', 'minutes'), 60),
(('second', 'seconds'), 1)
]
periods = periods[-accuracy:]
strings = []
i = 0
for period_name, period_seconds in periods:
if i < count:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
i += 1
if simple:
strings.append("{}{}".format(period_value, period_name))
else:
if period_value == 1:
strings.append("{} {}".format(period_value, period_name[0]))
else:
strings.append("{} {}".format(period_value, period_name[1]))
else:
break
if simple:
return " ".join(strings)
else:
return formatting.get_text_list(strings, "and")
| gpl-3.0 |
LamCiuLoeng/jcp | ordering/util/bin2hex.py | 1 | 3516 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os
import traceback
HEADER_LEN = 8
FILTER_LEN = 3
PARTITION_LEN = 3
SERIAL_LEN = 38
JCP_SERIAL_LEN = 35 # for jcp
PARTITION_PREFIX_NBR = {
0:[40,4],
1:[37,7],
2:[34,10],
3:[30,14],
4:[27,17],
5:[24,20],
6:[20,24],
}
class convertpy:
def __init__(self):
#self.string_num = string_num
#base = [0,1,2,3,4,5,6,7,8,9,A,B,C,D,E,F]
self.base = [str(x) for x in range(10)] + [chr(x) for x in range(ord('A'),ord('A')+6)]
#2 to 10
def bin2dec(self,string_num):
return str(int(string_num,2))
#16 to 10
def hex2dec(self,string_num):
try:
decNum = str(int(string_num.upper(),16))
return decNum
except:
raise
#10 to 2
def dec2bin(self,string_num,length=0):
num = int(string_num)
mid = []
while True:
if num == 0: break
num,rem = divmod(num,2)
mid.append(self.base[rem])
value = ''.join([str(x) for x in mid[::-1]])
if length == 0:
return value
else:
rest_len = length - len(value)
value = '0'*rest_len + value
return value
#10 to 16
def dec2hex(self,string_num):
num = int(string_num)
mid = []
while True:
if num == 0: break
num,rem = divmod(num,16)
mid.append(self.base[rem])
return ''.join([str(x) for x in mid[::-1]])
#16 to 2
def hex2bin(self,string_num):
return self.dec2bin(self.hex2dec(string_num.upper()))
#2 to 16
def bin2hex(self,string_num):
return self.dec2hex(self.bin2dec(string_num))
class upcToepc:
def __init__(self):
pass
def run(self,begin,upc,qty,header=48,fitler=1,partition=5):
try:
obj = convertpy()
rs = []
for i in range(begin,qty+begin):
string_num = obj.dec2bin(header,HEADER_LEN)
string_num += obj.dec2bin(fitler,FILTER_LEN)
string_num += obj.dec2bin(partition,PARTITION_LEN)
upcA = "0" + upc[:6] if len(upc) == 12 else upc[0:7]
upcB = "0" + upc[6:-1] if len(upc) == 12 else upc[7:-1]
string_num += obj.dec2bin(upcA,PARTITION_PREFIX_NBR[partition][0])
string_num += obj.dec2bin(upcB,PARTITION_PREFIX_NBR[partition][1])
# string_num += obj.dec2bin(i,SERIAL_LEN)
string_num += "110" + obj.dec2bin(i, JCP_SERIAL_LEN) # for jcp
value = obj.bin2hex(string_num)
rs.append(value)
return rs
except:
traceback.print_exc()
if __name__ == "__main__":
try:
obj = upcToepc()
upc = ['075338723709']
#8000000001
for u in upc:
data = obj.run(6995,u,1)
print data
#map(obj.run,upc*20,[i for i in range(0,20)])
'''
for i in range(0,20):
obj.run(48,1,5,upc,i)
string_num = '00110000 001 101 000000010011001110010110 00001011010001010101 00000001001001100101100000001011010010'
obj = convertpy()
string_num = string_num.replace(' ','')
value = obj.bin2hex(string_num)
print value
#10 to 2
string_num = '78742'
value = obj.dec2bin(string_num,24)
print value
'''
except:
traceback.print_exc()
| mit |
hylje/tekis | tekis/members/backends.py | 1 | 2237 | from __future__ import unicode_literals
from django.contrib.auth.models import Group, User
from tekis.members.models import TekisMember
class TekisAuthBackend(object):
"""Backend that authenticates with the members database and creates a
full Django user object if it succeeds
"""
def authenticate(self, username=None, password=None):
try:
member = TekisMember.objects.get(username=username)
except TekisMember.DoesNotExist:
return None
if not member.check_password(password):
return None
# create or update a Django user
name = member.screen_name.split()
fn = name[0]
ln = name[-1]
user, created = User.objects.get_or_create(
username=member.username,
defaults={
"username": member.username,
"email": member.email,
"first_name": fn,
"last_name": ln,
"date_joined": member.created,
}
)
# always update permissions
user.is_staff = member.role in ["virkailija",
"jasenvirkailija",
"yllapitaja"]
user.is_superuser = member.role == "yllapitaja"
user.is_active = member.membership not in ["ei-jasen", "erotettu"]
user.save()
# add and remove non-superuser permission group relationships
# "yllapitaja" is a superuser so he doesn't need any groups
virkailija, _ = Group.objects.get_or_create(name="virkailija")
if member.role == "virkailija":
user.groups.add(virkailija)
else:
user.groups.remove(virkailija)
jasenvirkailija, _ = Group.objects.get_or_create(name="jasenvirkailija")
if member.role == "jasenvirkailija":
user.groups.add(jasenvirkailija)
else:
user.groups.remove(jasenvirkailija)
# bail out inactive users here
if not user.is_active:
return None
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| bsd-3-clause |
davidmueller13/valexKernel-lt03wifi | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
lz1988/company-site | tests/regressiontests/defaultfilters/tests.py | 13 | 30538 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import decimal
from django.template.defaultfilters import *
from django.test import TestCase
from django.utils import six
from django.utils import unittest, translation
from django.utils.safestring import SafeData
from django.utils.encoding import python_2_unicode_compatible
class DefaultFiltersTests(TestCase):
def test_floatformat(self):
self.assertEqual(floatformat(7.7), '7.7')
self.assertEqual(floatformat(7.0), '7')
self.assertEqual(floatformat(0.7), '0.7')
self.assertEqual(floatformat(0.07), '0.1')
self.assertEqual(floatformat(0.007), '0.0')
self.assertEqual(floatformat(0.0), '0')
self.assertEqual(floatformat(7.7, 3), '7.700')
self.assertEqual(floatformat(6.000000, 3), '6.000')
self.assertEqual(floatformat(6.200000, 3), '6.200')
self.assertEqual(floatformat(6.200000, -3), '6.200')
self.assertEqual(floatformat(13.1031, -3), '13.103')
self.assertEqual(floatformat(11.1197, -2), '11.12')
self.assertEqual(floatformat(11.0000, -2), '11')
self.assertEqual(floatformat(11.000001, -2), '11.00')
self.assertEqual(floatformat(8.2798, 3), '8.280')
self.assertEqual(floatformat(5555.555, 2), '5555.56')
self.assertEqual(floatformat(001.3000, 2), '1.30')
self.assertEqual(floatformat(0.12345, 2), '0.12')
self.assertEqual(floatformat(decimal.Decimal('555.555'), 2), '555.56')
self.assertEqual(floatformat(decimal.Decimal('09.000')), '9')
self.assertEqual(floatformat('foo'), '')
self.assertEqual(floatformat(13.1031, 'bar'), '13.1031')
self.assertEqual(floatformat(18.125, 2), '18.13')
self.assertEqual(floatformat('foo', 'bar'), '')
self.assertEqual(floatformat('¿Cómo esta usted?'), '')
self.assertEqual(floatformat(None), '')
# Check that we're not converting to scientific notation.
self.assertEqual(floatformat(0, 6), '0.000000')
self.assertEqual(floatformat(0, 7), '0.0000000')
self.assertEqual(floatformat(0, 10), '0.0000000000')
self.assertEqual(floatformat(0.000000000000000000015, 20),
'0.00000000000000000002')
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), six.text_type(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), six.text_type(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), six.text_type(nan))
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), '11.00')
# Regression for #15789
decimal_ctx = decimal.getcontext()
old_prec, decimal_ctx.prec = decimal_ctx.prec, 2
try:
self.assertEqual(floatformat(1.2345, 2), '1.23')
self.assertEqual(floatformat(15.2042, -3), '15.204')
self.assertEqual(floatformat(1.2345, '2'), '1.23')
self.assertEqual(floatformat(15.2042, '-3'), '15.204')
self.assertEqual(floatformat(decimal.Decimal('1.2345'), 2), '1.23')
self.assertEqual(floatformat(decimal.Decimal('15.2042'), -3), '15.204')
finally:
decimal_ctx.prec = old_prec
def test_floatformat_py2_fail(self):
self.assertEqual(floatformat(1.00000000000000015, 16), '1.0000000000000002')
# The test above fails because of Python 2's float handling. Floats with
# many zeroes after the decimal point should be passed in as another type
# such as unicode or Decimal.
if not six.PY3:
test_floatformat_py2_fail = unittest.expectedFailure(test_floatformat_py2_fail)
def test_addslashes(self):
self.assertEqual(addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'')
self.assertEqual(addslashes(r'\ : backslashes, too'),
'\\\\ : backslashes, too')
def test_capfirst(self):
self.assertEqual(capfirst('hello world'), 'Hello world')
def test_escapejs(self):
self.assertEqual(escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')
self.assertEqual(escapejs_filter(r'\ : backslashes, too'),
'\\u005C : backslashes, too')
self.assertEqual(escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')
self.assertEqual(escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E')
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028')
def test_fix_ampersands(self):
self.assertEqual(fix_ampersands_filter('Jack & Jill & Jeroboam'),
'Jack & Jill & Jeroboam')
def test_linenumbers(self):
self.assertEqual(linenumbers('line 1\nline 2'),
'1. line 1\n2. line 2')
self.assertEqual(linenumbers('\n'.join(['x'] * 10)),
'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\
'x\n08. x\n09. x\n10. x')
def test_lower(self):
self.assertEqual(lower('TEST'), 'test')
# uppercase E umlaut
self.assertEqual(lower('\xcb'), '\xeb')
def test_make_list(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
def test_slugify(self):
self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\
' silly characters ?%.$!/'),
'jack-jill-like-numbers-123-and-4-and-silly-characters')
self.assertEqual(slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
'un-elephant-a-loree-du-bois')
def test_stringformat(self):
self.assertEqual(stringformat(1, '03d'), '001')
self.assertEqual(stringformat(1, 'z'), '')
def test_title(self):
self.assertEqual(title('a nice title, isn\'t it?'),
"A Nice Title, Isn't It?")
self.assertEqual(title('discoth\xe8que'), 'Discoth\xe8que')
def test_truncatewords(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 1), 'A ...')
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...')
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it')
self.assertEqual(
truncatewords('A sentence with a few words in it',
'not a number'), 'A sentence with a few words in it')
def test_truncatewords_html(self):
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
self.assertEqual(truncatewords_html('<p>one <a href="#">two - '\
'three <br>four</a> five</p>', 2),
'<p>one <a href="#">two ...</a></p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four ...</a></p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
'\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...')
def test_upper(self):
self.assertEqual(upper('Mixed case input'), 'MIXED CASE INPUT')
# lowercase e umlaut
self.assertEqual(upper('\xeb'), '\xcb')
def test_urlencode(self):
self.assertEqual(urlencode('fran\xe7ois & jill'),
'fran%C3%A7ois%20%26%20jill')
self.assertEqual(urlencode(1), '1')
def test_iriencode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'),
'S%C3%B8r-Tr%C3%B8ndelag')
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')),
'fran%C3%A7ois%20%26%20jill')
def test_urlizetrunc(self):
self.assertEqual(urlizetrunc('http://short.com/', 20), '<a href='\
'"http://short.com/" rel="nofollow">http://short.com/</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
'&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\
'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\
'meta=" rel="nofollow">http://www.google...</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
'&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\
'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\
'&meta=" rel="nofollow">http://www.google...</a>')
# Check truncating of URIs which are the exact length
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'\
'http://31characteruri.com/test/</a>')
self.assertEqual(urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'\
'http://31characteruri.com/t...</a>')
self.assertEqual(urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'\
' rel="nofollow">...</a>')
def test_urlize(self):
# Check normal urlize
self.assertEqual(urlize('http://google.com'),
'<a href="http://google.com" rel="nofollow">http://google.com</a>')
self.assertEqual(urlize('http://google.com/'),
'<a href="http://google.com/" rel="nofollow">http://google.com/</a>')
self.assertEqual(urlize('www.google.com'),
'<a href="http://www.google.com" rel="nofollow">www.google.com</a>')
self.assertEqual(urlize('djangoproject.org'),
'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')
self.assertEqual(urlize('info@djangoproject.org'),
'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>')
# Check urlize with https addresses
self.assertEqual(urlize('https://google.com'),
'<a href="https://google.com" rel="nofollow">https://google.com</a>')
# Check urlize doesn't overquote already quoted urls - see #9655
self.assertEqual(urlize('http://hi.baidu.com/%D6%D8%D0%C2%BF'),
'<a href="http://hi.baidu.com/%D6%D8%D0%C2%BF" rel="nofollow">'
'http://hi.baidu.com/%D6%D8%D0%C2%BF</a>')
self.assertEqual(urlize('www.mystore.com/30%OffCoupons!'),
'<a href="http://www.mystore.com/30%25OffCoupons!" rel="nofollow">'
'www.mystore.com/30%OffCoupons!</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Caf%C3%A9'),
'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'http://en.wikipedia.org/wiki/Caf%C3%A9</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Café'),
'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'http://en.wikipedia.org/wiki/Café</a>')
# Check urlize keeps balanced parentheses - see #11911
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Django_(web_framework)'),
'<a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'http://en.wikipedia.org/wiki/Django_(web_framework)</a>')
self.assertEqual(urlize('(see http://en.wikipedia.org/wiki/Django_(web_framework))'),
'(see <a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'http://en.wikipedia.org/wiki/Django_(web_framework)</a>)')
# Check urlize adds nofollow properly - see #12183
self.assertEqual(urlize('foo@bar.com or www.bar.com'),
'<a href="mailto:foo@bar.com">foo@bar.com</a> or '
'<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>')
# Check urlize handles IDN correctly - see #13704
self.assertEqual(urlize('http://c✶.ws'),
'<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>')
self.assertEqual(urlize('www.c✶.ws'),
'<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>')
self.assertEqual(urlize('c✶.org'),
'<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>')
self.assertEqual(urlize('info@c✶.org'),
'<a href="mailto:info@xn--c-lgq.org">info@c✶.org</a>')
# Check urlize doesn't highlight malformed URIs - see #16395
self.assertEqual(urlize('http:///www.google.com'),
'http:///www.google.com')
self.assertEqual(urlize('http://.google.com'),
'http://.google.com')
self.assertEqual(urlize('http://@foo.com'),
'http://@foo.com')
# Check urlize accepts more TLDs - see #16656
self.assertEqual(urlize('usa.gov'),
'<a href="http://usa.gov" rel="nofollow">usa.gov</a>')
# Check urlize don't crash on invalid email with dot-starting domain - see #17592
self.assertEqual(urlize('email@.stream.ru'),
'email@.stream.ru')
# Check urlize accepts uppercased URL schemes - see #18071
self.assertEqual(urlize('HTTPS://github.com/'),
'<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>')
# Check urlize trims trailing period when followed by parenthesis - see #18644
self.assertEqual(urlize('(Go to http://www.example.com/foo.)'),
'(Go to <a href="http://www.example.com/foo" rel="nofollow">http://www.example.com/foo</a>.)')
# Check urlize doesn't crash when square bracket is appended to url (#19070)
self.assertEqual(urlize('[see www.example.com]'),
'[see <a href="http://www.example.com" rel="nofollow">www.example.com</a>]' )
def test_wordcount(self):
self.assertEqual(wordcount(''), 0)
self.assertEqual(wordcount('oneword'), 1)
self.assertEqual(wordcount('lots of words'), 3)
self.assertEqual(wordwrap('this is a long paragraph of text that '\
'really needs to be wrapped I\'m afraid', 14),
"this is a long\nparagraph of\ntext that\nreally needs\nto be "\
"wrapped\nI'm afraid")
self.assertEqual(wordwrap('this is a short paragraph of text.\n '\
'But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline '\
'should be\nindented')
self.assertEqual(wordwrap('this is a short paragraph of text.\n '\
'But this line should be indented',15), 'this is a short\n'\
'paragraph of\ntext.\n But this line\nshould be\nindented')
def test_rjust(self):
self.assertEqual(ljust('test', 10), 'test ')
self.assertEqual(ljust('test', 3), 'test')
self.assertEqual(rjust('test', 10), ' test')
self.assertEqual(rjust('test', 3), 'test')
def test_center(self):
self.assertEqual(center('test', 6), ' test ')
def test_cut(self):
self.assertEqual(cut('a string to be mangled', 'a'),
' string to be mngled')
self.assertEqual(cut('a string to be mangled', 'ng'),
'a stri to be maled')
self.assertEqual(cut('a string to be mangled', 'strings'),
'a string to be mangled')
def test_force_escape(self):
escaped = force_escape('<some html & special characters > here')
self.assertEqual(
escaped, '<some html & special characters > here')
self.assertTrue(isinstance(escaped, SafeData))
self.assertEqual(
force_escape('<some html & special characters > here ĐÅ€£'),
'<some html & special characters > here'\
' \u0110\xc5\u20ac\xa3')
def test_linebreaks(self):
self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>')
self.assertEqual(linebreaks_filter('line 1\nline 2'),
'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter('line 1\rline 2'),
'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter('line 1\r\nline 2'),
'<p>line 1<br />line 2</p>')
def test_linebreaksbr(self):
self.assertEqual(linebreaksbr('line 1\nline 2'),
'line 1<br />line 2')
self.assertEqual(linebreaksbr('line 1\rline 2'),
'line 1<br />line 2')
self.assertEqual(linebreaksbr('line 1\r\nline 2'),
'line 1<br />line 2')
def test_removetags(self):
self.assertEqual(removetags('some <b>html</b> with <script>alert'\
'("You smell")</script> disallowed <img /> tags', 'script img'),
'some <b>html</b> with alert("You smell") disallowed tags')
self.assertEqual(striptags('some <b>html</b> with <script>alert'\
'("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags')
def test_dictsort(self):
sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}], 'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
def test_dictsortreversed(self):
sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
def test_first(self):
self.assertEqual(first([0,1,2]), 0)
self.assertEqual(first(''), '')
self.assertEqual(first('test'), 't')
def test_join(self):
self.assertEqual(join([0,1,2], 'glue'), '0glue1glue2')
def test_length(self):
self.assertEqual(length('1234'), 4)
self.assertEqual(length([1,2,3,4]), 4)
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
def test_slice(self):
self.assertEqual(slice_filter('abcdefg', '0'), '')
self.assertEqual(slice_filter('abcdefg', '1'), 'a')
self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')
self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')
self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')
self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')
def test_unordered_list(self):
self.assertEqual(unordered_list(['item 1', 'item 2']),
'\t<li>item 1</li>\n\t<li>item 2</li>')
self.assertEqual(unordered_list(['item 1', ['item 1.1']]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(
unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\
'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')
self.assertEqual(
unordered_list(['item 1', ['item 1.1', ['item 1.1.1',
['item 1.1.1.1']]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\
'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\
'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(
['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\
'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\
'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')
@python_2_unicode_compatible
class ULItem(object):
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a,b]),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
# Old format for unordered lists should still work
self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>')
self.assertEqual(unordered_list(['item 1', [['item 1.1', []]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['item 1', [['item 1.1', []],
['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\
'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',
[]], ['Topeka', []]]], ['Illinois', []]]]), '\t<li>States\n\t'\
'<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\
'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\
'Illinois</li>\n\t</ul>\n\t</li>')
def test_add(self):
self.assertEqual(add('1', '2'), 3)
def test_get_digit(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
self.assertEqual(get_digit('xyz', 0), 'xyz')
def test_date(self):
# real testing of date() is in dateformat.py
self.assertEqual(date(datetime.datetime(2005, 12, 29), "d F Y"),
'29 December 2005')
self.assertEqual(date(datetime.datetime(2005, 12, 29), r'jS \o\f F'),
'29th of December')
def test_time(self):
# real testing of time() is done in dateformat.py
self.assertEqual(time(datetime.time(13), "h"), '01')
self.assertEqual(time(datetime.time(0), "h"), '12')
def test_timesince(self):
# real testing is done in timesince.py, where we can provide our own 'now'
self.assertEqual(
timesince_filter(datetime.datetime.now() - datetime.timedelta(1)),
'1 day')
self.assertEqual(
timesince_filter(datetime.datetime(2005, 12, 29),
datetime.datetime(2005, 12, 30)),
'1 day')
def test_timeuntil(self):
self.assertEqual(
timeuntil_filter(datetime.datetime.now() + datetime.timedelta(1, 1)),
'1 day')
self.assertEqual(
timeuntil_filter(datetime.datetime(2005, 12, 30),
datetime.datetime(2005, 12, 29)),
'1 day')
def test_default(self):
self.assertEqual(default("val", "default"), 'val')
self.assertEqual(default(None, "default"), 'default')
self.assertEqual(default('', "default"), 'default')
def test_if_none(self):
self.assertEqual(default_if_none("val", "default"), 'val')
self.assertEqual(default_if_none(None, "default"), 'default')
self.assertEqual(default_if_none('', "default"), '')
def test_divisibleby(self):
self.assertEqual(divisibleby(4, 2), True)
self.assertEqual(divisibleby(4, 3), False)
def test_yesno(self):
self.assertEqual(yesno(True), 'yes')
self.assertEqual(yesno(False), 'no')
self.assertEqual(yesno(None), 'maybe')
self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'),
'certainly')
self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'),
'get out of town')
self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'),
'perhaps')
self.assertEqual(yesno(None, 'certainly,get out of town'),
'get out of town')
def test_filesizeformat(self):
self.assertEqual(filesizeformat(1023), '1023 bytes')
self.assertEqual(filesizeformat(1024), '1.0 KB')
self.assertEqual(filesizeformat(10*1024), '10.0 KB')
self.assertEqual(filesizeformat(1024*1024-1), '1024.0 KB')
self.assertEqual(filesizeformat(1024*1024), '1.0 MB')
self.assertEqual(filesizeformat(1024*1024*50), '50.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), '1024.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), '1.0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), '1.0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024), '1.0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
'2000.0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), '0 bytes')
self.assertEqual(filesizeformat(""), '0 bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),
'0 bytes')
def test_localized_filesizeformat(self):
with self.settings(USE_L10N=True):
with translation.override('de', deactivate=True):
self.assertEqual(filesizeformat(1023), '1023 Bytes')
self.assertEqual(filesizeformat(1024), '1,0 KB')
self.assertEqual(filesizeformat(10*1024), '10,0 KB')
self.assertEqual(filesizeformat(1024*1024-1), '1024,0 KB')
self.assertEqual(filesizeformat(1024*1024), '1,0 MB')
self.assertEqual(filesizeformat(1024*1024*50), '50,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), '1024,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), '1,0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), '1,0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024),
'1,0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
'2000,0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), '0 Bytes')
self.assertEqual(filesizeformat(""), '0 Bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),
'0 Bytes')
def test_pluralize(self):
self.assertEqual(pluralize(1), '')
self.assertEqual(pluralize(0), 's')
self.assertEqual(pluralize(2), 's')
self.assertEqual(pluralize([1]), '')
self.assertEqual(pluralize([]), 's')
self.assertEqual(pluralize([1,2,3]), 's')
self.assertEqual(pluralize(1,'es'), '')
self.assertEqual(pluralize(0,'es'), 'es')
self.assertEqual(pluralize(2,'es'), 'es')
self.assertEqual(pluralize(1,'y,ies'), 'y')
self.assertEqual(pluralize(0,'y,ies'), 'ies')
self.assertEqual(pluralize(2,'y,ies'), 'ies')
self.assertEqual(pluralize(0,'y,ies,error'), '')
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
def test_non_string_input(self):
# Filters shouldn't break if passed non-strings
self.assertEqual(addslashes(123), '123')
self.assertEqual(linenumbers(123), '1. 123')
self.assertEqual(lower(123), '123')
self.assertEqual(make_list(123), ['1', '2', '3'])
self.assertEqual(slugify(123), '123')
self.assertEqual(title(123), '123')
self.assertEqual(truncatewords(123, 2), '123')
self.assertEqual(upper(123), '123')
self.assertEqual(urlencode(123), '123')
self.assertEqual(urlize(123), '123')
self.assertEqual(urlizetrunc(123, 1), '123')
self.assertEqual(wordcount(123), 1)
self.assertEqual(wordwrap(123, 2), '123')
self.assertEqual(ljust('123', 4), '123 ')
self.assertEqual(rjust('123', 4), ' 123')
self.assertEqual(center('123', 5), ' 123 ')
self.assertEqual(center('123', 6), ' 123 ')
self.assertEqual(cut(123, '2'), '13')
self.assertEqual(escape(123), '123')
self.assertEqual(linebreaks_filter(123), '<p>123</p>')
self.assertEqual(linebreaksbr(123), '123')
self.assertEqual(removetags(123, 'a'), '123')
self.assertEqual(striptags(123), '123')
| bsd-3-clause |
Donkyhotay/MoonPy | twisted/internet/defer.py | 1 | 38383 | # -*- test-case-name: twisted.test.test_defer -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for results that aren't immediately available.
Maintainer: Glyph Lefkowitz
"""
import traceback
import warnings
# Twisted imports
from twisted.python import log, failure, lockfile
from twisted.python.util import unsignedID, mergeFunctionMetadata
class AlreadyCalledError(Exception):
pass
class TimeoutError(Exception):
pass
def logError(err):
log.err(err)
return err
def succeed(result):
"""
Return a Deferred that has already had '.callback(result)' called.
This is useful when you're writing synchronous code to an
asynchronous interface: i.e., some code is calling you expecting a
Deferred result, but you don't actually need to do anything
asynchronous. Just return defer.succeed(theResult).
See L{fail} for a version of this function that uses a failing
Deferred rather than a successful one.
@param result: The result to give to the Deferred's 'callback'
method.
@rtype: L{Deferred}
"""
d = Deferred()
d.callback(result)
return d
def fail(result=None):
"""
Return a Deferred that has already had '.errback(result)' called.
See L{succeed}'s docstring for rationale.
@param result: The same argument that L{Deferred.errback} takes.
@raise NoCurrentExceptionError: If C{result} is C{None} but there is no
current exception state.
@rtype: L{Deferred}
"""
d = Deferred()
d.errback(result)
return d
def execute(callable, *args, **kw):
"""Create a deferred from a callable and arguments.
Call the given function with the given arguments. Return a deferred which
has been fired with its callback as the result of that invocation or its
errback with a Failure for the exception thrown.
"""
try:
result = callable(*args, **kw)
except:
return fail()
else:
return succeed(result)
def maybeDeferred(f, *args, **kw):
"""Invoke a function that may or may not return a deferred.
Call the given function with the given arguments. If the returned
object is a C{Deferred}, return it. If the returned object is a C{Failure},
wrap it with C{fail} and return it. Otherwise, wrap it in C{succeed} and
return it. If an exception is raised, convert it to a C{Failure}, wrap it
in C{fail}, and then return it.
@type f: Any callable
@param f: The callable to invoke
@param args: The arguments to pass to C{f}
@param kw: The keyword arguments to pass to C{f}
@rtype: C{Deferred}
@return: The result of the function call, wrapped in a C{Deferred} if
necessary.
"""
try:
result = f(*args, **kw)
except:
return fail(failure.Failure())
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
def timeout(deferred):
deferred.errback(failure.Failure(TimeoutError("Callback timed out")))
def passthru(arg):
return arg
def setDebugging(on):
"""Enable or disable Deferred debugging.
When debugging is on, the call stacks from creation and invocation are
recorded, and added to any AlreadyCalledErrors we raise.
"""
Deferred.debug=bool(on)
def getDebugging():
"""Determine whether Deferred debugging is enabled.
"""
return Deferred.debug
class Deferred:
"""This is a callback which will be put off until later.
Why do we want this? Well, in cases where a function in a threaded
program would block until it gets a result, for Twisted it should
not block. Instead, it should return a Deferred.
This can be implemented for protocols that run over the network by
writing an asynchronous protocol for twisted.internet. For methods
that come from outside packages that are not under our control, we use
threads (see for example L{twisted.enterprise.adbapi}).
For more information about Deferreds, see doc/howto/defer.html or
U{http://twistedmatrix.com/projects/core/documentation/howto/defer.html}
"""
called = 0
paused = 0
timeoutCall = None
_debugInfo = None
# Are we currently running a user-installed callback? Meant to prevent
# recursive running of callbacks when a reentrant call to add a callback is
# used.
_runningCallbacks = False
# Keep this class attribute for now, for compatibility with code that
# sets it directly.
debug = False
def __init__(self):
self.callbacks = []
if self.debug:
self._debugInfo = DebugInfo()
self._debugInfo.creator = traceback.format_stack()[:-1]
def addCallbacks(self, callback, errback=None,
callbackArgs=None, callbackKeywords=None,
errbackArgs=None, errbackKeywords=None):
"""Add a pair of callbacks (success and error) to this Deferred.
These will be executed when the 'master' callback is run.
"""
assert callable(callback)
assert errback == None or callable(errback)
cbs = ((callback, callbackArgs, callbackKeywords),
(errback or (passthru), errbackArgs, errbackKeywords))
self.callbacks.append(cbs)
if self.called:
self._runCallbacks()
return self
def addCallback(self, callback, *args, **kw):
"""Convenience method for adding just a callback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callbackArgs=args,
callbackKeywords=kw)
def addErrback(self, errback, *args, **kw):
"""Convenience method for adding just an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(passthru, errback,
errbackArgs=args,
errbackKeywords=kw)
def addBoth(self, callback, *args, **kw):
"""Convenience method for adding a single callable as both a callback
and an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callback,
callbackArgs=args, errbackArgs=args,
callbackKeywords=kw, errbackKeywords=kw)
def chainDeferred(self, d):
"""Chain another Deferred to this Deferred.
This method adds callbacks to this Deferred to call d's callback or
errback, as appropriate. It is merely a shorthand way of performing
the following::
self.addCallbacks(d.callback, d.errback)
When you chain a deferred d2 to another deferred d1 with
d1.chainDeferred(d2), you are making d2 participate in the callback
chain of d1. Thus any event that fires d1 will also fire d2.
However, the converse is B{not} true; if d2 is fired d1 will not be
affected.
"""
return self.addCallbacks(d.callback, d.errback)
def callback(self, result):
"""Run all success callbacks that have been added to this Deferred.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the success-callback returns a Failure
or raises an Exception, processing will continue on the *error*-
callback chain.
"""
assert not isinstance(result, Deferred)
self._startRunCallbacks(result)
def errback(self, fail=None):
"""
Run all error callbacks that have been added to this Deferred.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the error-callback returns a non-Failure
or doesn't raise an Exception, processing will continue on the
*success*-callback chain.
If the argument that's passed to me is not a failure.Failure instance,
it will be embedded in one. If no argument is passed, a failure.Failure
instance will be created based on the current traceback stack.
Passing a string as `fail' is deprecated, and will be punished with
a warning message.
@raise NoCurrentExceptionError: If C{fail} is C{None} but there is
no current exception state.
"""
if not isinstance(fail, failure.Failure):
fail = failure.Failure(fail)
self._startRunCallbacks(fail)
def pause(self):
"""Stop processing on a Deferred until L{unpause}() is called.
"""
self.paused = self.paused + 1
def unpause(self):
"""Process all callbacks made since L{pause}() was called.
"""
self.paused = self.paused - 1
if self.paused:
return
if self.called:
self._runCallbacks()
def _continue(self, result):
self.result = result
self.unpause()
def _startRunCallbacks(self, result):
if self.called:
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
extra = "\n" + self._debugInfo._getDebugTracebacks()
raise AlreadyCalledError(extra)
raise AlreadyCalledError
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.invoker = traceback.format_stack()[:-2]
self.called = True
self.result = result
if self.timeoutCall:
try:
self.timeoutCall.cancel()
except:
pass
del self.timeoutCall
self._runCallbacks()
def _runCallbacks(self):
if self._runningCallbacks:
# Don't recursively run callbacks
return
if not self.paused:
while self.callbacks:
item = self.callbacks.pop(0)
callback, args, kw = item[
isinstance(self.result, failure.Failure)]
args = args or ()
kw = kw or {}
try:
self._runningCallbacks = True
try:
self.result = callback(self.result, *args, **kw)
finally:
self._runningCallbacks = False
if isinstance(self.result, Deferred):
# note: this will cause _runCallbacks to be called
# recursively if self.result already has a result.
# This shouldn't cause any problems, since there is no
# relevant state in this stack frame at this point.
# The recursive call will continue to process
# self.callbacks until it is empty, then return here,
# where there is no more work to be done, so this call
# will return as well.
self.pause()
self.result.addBoth(self._continue)
break
except:
self.result = failure.Failure()
if isinstance(self.result, failure.Failure):
self.result.cleanFailure()
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.failResult = self.result
else:
if self._debugInfo is not None:
self._debugInfo.failResult = None
def setTimeout(self, seconds, timeoutFunc=timeout, *args, **kw):
"""Set a timeout function to be triggered if I am not called.
@param seconds: How long to wait (from now) before firing the
timeoutFunc.
@param timeoutFunc: will receive the Deferred and *args, **kw as its
arguments. The default timeoutFunc will call the errback with a
L{TimeoutError}.
"""
warnings.warn(
"Deferred.setTimeout is deprecated. Look for timeout "
"support specific to the API you are using instead.",
DeprecationWarning, stacklevel=2)
if self.called:
return
assert not self.timeoutCall, "Don't call setTimeout twice on the same Deferred."
from twisted.internet import reactor
self.timeoutCall = reactor.callLater(
seconds,
lambda: self.called or timeoutFunc(self, *args, **kw))
return self.timeoutCall
def __str__(self):
cname = self.__class__.__name__
if hasattr(self, 'result'):
return "<%s at %s current result: %r>" % (cname, hex(unsignedID(self)),
self.result)
return "<%s at %s>" % (cname, hex(unsignedID(self)))
__repr__ = __str__
class DebugInfo:
"""Deferred debug helper"""
failResult = None
def _getDebugTracebacks(self):
info = ''
if hasattr(self, "creator"):
info += " C: Deferred was created:\n C:"
info += "".join(self.creator).rstrip().replace("\n","\n C:")
info += "\n"
if hasattr(self, "invoker"):
info += " I: First Invoker was:\n I:"
info += "".join(self.invoker).rstrip().replace("\n","\n I:")
info += "\n"
return info
def __del__(self):
"""Print tracebacks and die.
If the *last* (and I do mean *last*) callback leaves me in an error
state, print a traceback (if said errback is a Failure).
"""
if self.failResult is not None:
log.msg("Unhandled error in Deferred:", isError=True)
debugInfo = self._getDebugTracebacks()
if debugInfo != '':
log.msg("(debug: " + debugInfo + ")", isError=True)
log.err(self.failResult)
class FirstError(Exception):
"""
First error to occur in a L{DeferredList} if C{fireOnOneErrback} is set.
@ivar subFailure: The L{Failure} that occurred.
@type subFailure: L{Failure}
@ivar index: The index of the L{Deferred} in the L{DeferredList} where
it happened.
@type index: C{int}
"""
def __init__(self, failure, index):
Exception.__init__(self, failure, index)
self.subFailure = failure
self.index = index
def __repr__(self):
"""
The I{repr} of L{FirstError} instances includes the repr of the
wrapped failure's exception and the index of the L{FirstError}.
"""
return 'FirstError[#%d, %r]' % (self.index, self.subFailure.value)
def __str__(self):
"""
The I{str} of L{FirstError} instances includes the I{str} of the
entire wrapped failure (including its traceback and exception) and
the index of the L{FirstError}.
"""
return 'FirstError[#%d, %s]' % (self.index, self.subFailure)
def __cmp__(self, other):
"""
Comparison between L{FirstError} and other L{FirstError} instances
is defined as the comparison of the index and sub-failure of each
instance. L{FirstError} instances don't compare equal to anything
that isn't a L{FirstError} instance.
@since: 8.2
"""
if isinstance(other, FirstError):
return cmp(
(self.index, self.subFailure),
(other.index, other.subFailure))
return -1
class DeferredList(Deferred):
"""I combine a group of deferreds into one callback.
I track a list of L{Deferred}s for their callbacks, and make a single
callback when they have all completed, a list of (success, result)
tuples, 'success' being a boolean.
Note that you can still use a L{Deferred} after putting it in a
DeferredList. For example, you can suppress 'Unhandled error in Deferred'
messages by adding errbacks to the Deferreds *after* putting them in the
DeferredList, as a DeferredList won't swallow the errors. (Although a more
convenient way to do this is simply to set the consumeErrors flag)
"""
fireOnOneCallback = 0
fireOnOneErrback = 0
def __init__(self, deferredList, fireOnOneCallback=0, fireOnOneErrback=0,
consumeErrors=0):
"""Initialize a DeferredList.
@type deferredList: C{list} of L{Deferred}s
@param deferredList: The list of deferreds to track.
@param fireOnOneCallback: (keyword param) a flag indicating that
only one callback needs to be fired for me to call
my callback
@param fireOnOneErrback: (keyword param) a flag indicating that
only one errback needs to be fired for me to call
my errback
@param consumeErrors: (keyword param) a flag indicating that any errors
raised in the original deferreds should be
consumed by this DeferredList. This is useful to
prevent spurious warnings being logged.
"""
self.resultList = [None] * len(deferredList)
Deferred.__init__(self)
if len(deferredList) == 0 and not fireOnOneCallback:
self.callback(self.resultList)
# These flags need to be set *before* attaching callbacks to the
# deferreds, because the callbacks use these flags, and will run
# synchronously if any of the deferreds are already fired.
self.fireOnOneCallback = fireOnOneCallback
self.fireOnOneErrback = fireOnOneErrback
self.consumeErrors = consumeErrors
self.finishedCount = 0
index = 0
for deferred in deferredList:
deferred.addCallbacks(self._cbDeferred, self._cbDeferred,
callbackArgs=(index,SUCCESS),
errbackArgs=(index,FAILURE))
index = index + 1
def _cbDeferred(self, result, index, succeeded):
"""(internal) Callback for when one of my deferreds fires.
"""
self.resultList[index] = (succeeded, result)
self.finishedCount += 1
if not self.called:
if succeeded == SUCCESS and self.fireOnOneCallback:
self.callback((result, index))
elif succeeded == FAILURE and self.fireOnOneErrback:
self.errback(failure.Failure(FirstError(result, index)))
elif self.finishedCount == len(self.resultList):
self.callback(self.resultList)
if succeeded == FAILURE and self.consumeErrors:
result = None
return result
def _parseDListResult(l, fireOnOneErrback=0):
if __debug__:
for success, value in l:
assert success
return [x[1] for x in l]
def gatherResults(deferredList):
"""Returns list with result of given Deferreds.
This builds on C{DeferredList} but is useful since you don't
need to parse the result for success/failure.
@type deferredList: C{list} of L{Deferred}s
"""
d = DeferredList(deferredList, fireOnOneErrback=1)
d.addCallback(_parseDListResult)
return d
# Constants for use with DeferredList
SUCCESS = True
FAILURE = False
## deferredGenerator
class waitForDeferred:
"""
See L{deferredGenerator}.
"""
def __init__(self, d):
if not isinstance(d, Deferred):
raise TypeError("You must give waitForDeferred a Deferred. You gave it %r." % (d,))
self.d = d
def getResult(self):
if isinstance(self.result, failure.Failure):
self.result.raiseException()
return self.result
def _deferGenerator(g, deferred):
"""
See L{deferredGenerator}.
"""
result = None
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, # defgen is waiting for result?
None] # result
while 1:
try:
result = g.next()
except StopIteration:
deferred.callback(result)
return deferred
except:
deferred.errback()
return deferred
# Deferred.callback(Deferred) raises an error; we catch this case
# early here and give a nicer error message to the user in case
# they yield a Deferred.
if isinstance(result, Deferred):
return fail(TypeError("Yield waitForDeferred(d), not d!"))
if isinstance(result, waitForDeferred):
# a waitForDeferred was yielded, get the result.
# Pass result in so it don't get changed going around the loop
# This isn't a problem for waiting, as it's only reused if
# gotResult has already been executed.
def gotResult(r, result=result):
result.result = r
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_deferGenerator(g, deferred)
result.d.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
# Reset waiting to initial values for next loop
waiting[0] = True
waiting[1] = None
result = None
def deferredGenerator(f):
"""
deferredGenerator and waitForDeferred help you write Deferred-using code
that looks like a regular sequential function. If your code has a minimum
requirement of Python 2.5, consider the use of L{inlineCallbacks} instead,
which can accomplish the same thing in a more concise manner.
There are two important functions involved: waitForDeferred, and
deferredGenerator. They are used together, like this::
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
print thing #the result! hoorj!
thingummy = deferredGenerator(thingummy)
waitForDeferred returns something that you should immediately yield; when
your generator is resumed, calling thing.getResult() will either give you
the result of the Deferred if it was a success, or raise an exception if it
was a failure. Calling C{getResult} is B{absolutely mandatory}. If you do
not call it, I{your program will not work}.
deferredGenerator takes one of these waitForDeferred-using generator
functions and converts it into a function that returns a Deferred. The
result of the Deferred will be the last value that your generator yielded
unless the last value is a waitForDeferred instance, in which case the
result will be C{None}. If the function raises an unhandled exception, the
Deferred will errback instead. Remember that 'return result' won't work;
use 'yield result; return' in place of that.
Note that not yielding anything from your generator will make the Deferred
result in None. Yielding a Deferred from your generator is also an error
condition; always yield waitForDeferred(d) instead.
The Deferred returned from your deferred generator may also errback if your
generator raised an exception. For example::
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
if thing == 'I love Twisted':
# will become the result of the Deferred
yield 'TWISTED IS GREAT!'
return
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
thingummy = deferredGenerator(thingummy)
Put succinctly, these functions connect deferred-using code with this 'fake
blocking' style in both directions: waitForDeferred converts from a
Deferred to the 'blocking' style, and deferredGenerator converts from the
'blocking' style to a Deferred.
"""
def unwindGenerator(*args, **kwargs):
return _deferGenerator(f(*args, **kwargs), Deferred())
return mergeFunctionMetadata(f, unwindGenerator)
## inlineCallbacks
# BaseException is only in Py 2.5.
try:
BaseException
except NameError:
BaseException=Exception
class _DefGen_Return(BaseException):
def __init__(self, value):
self.value = value
def returnValue(val):
"""
Return val from a L{inlineCallbacks} generator.
Note: this is currently implemented by raising an exception
derived from BaseException. You might want to change any
'except:' clauses to an 'except Exception:' clause so as not to
catch this exception.
Also: while this function currently will work when called from
within arbitrary functions called from within the generator, do
not rely upon this behavior.
"""
raise _DefGen_Return(val)
def _inlineCallbacks(result, g, deferred):
"""
See L{inlineCallbacks}.
"""
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, # waiting for result?
None] # result
while 1:
try:
# Send the last result back as the result of the yield expression.
if isinstance(result, failure.Failure):
result = result.throwExceptionIntoGenerator(g)
else:
result = g.send(result)
except StopIteration:
# fell off the end, or "return" statement
deferred.callback(None)
return deferred
except _DefGen_Return, e:
# returnValue call
deferred.callback(e.value)
return deferred
except:
deferred.errback()
return deferred
if isinstance(result, Deferred):
# a deferred was yielded, get the result.
def gotResult(r):
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_inlineCallbacks(r, g, deferred)
result.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
result = waiting[1]
# Reset waiting to initial values for next loop. gotResult uses
# waiting, but this isn't a problem because gotResult is only
# executed once, and if it hasn't been executed yet, the return
# branch above would have been taken.
waiting[0] = True
waiting[1] = None
return deferred
def inlineCallbacks(f):
"""
WARNING: this function will not work in Python 2.4 and earlier!
inlineCallbacks helps you write Deferred-using code that looks like a
regular sequential function. This function uses features of Python 2.5
generators. If you need to be compatible with Python 2.4 or before, use
the L{deferredGenerator} function instead, which accomplishes the same
thing, but with somewhat more boilerplate. For example::
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
print thing #the result! hoorj!
thingummy = inlineCallbacks(thingummy)
When you call anything that results in a Deferred, you can simply yield it;
your generator will automatically be resumed when the Deferred's result is
available. The generator will be sent the result of the Deferred with the
'send' method on generators, or if the result was a failure, 'throw'.
Your inlineCallbacks-enabled generator will return a Deferred object, which
will result in the return value of the generator (or will fail with a
failure object if your generator raises an unhandled exception). Note that
you can't use 'return result' to return a value; use 'returnValue(result)'
instead. Falling off the end of the generator, or simply using 'return'
will cause the Deferred to have a result of None.
The Deferred returned from your deferred generator may errback if your
generator raised an exception::
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
if thing == 'I love Twisted':
# will become the result of the Deferred
returnValue('TWISTED IS GREAT!')
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
thingummy = inlineCallbacks(thingummy)
"""
def unwindGenerator(*args, **kwargs):
return _inlineCallbacks(None, f(*args, **kwargs), Deferred())
return mergeFunctionMetadata(f, unwindGenerator)
## DeferredLock/DeferredQueue
class _ConcurrencyPrimitive(object):
def __init__(self):
self.waiting = []
def _releaseAndReturn(self, r):
self.release()
return r
def run(*args, **kwargs):
"""Acquire, run, release.
This function takes a callable as its first argument and any
number of other positional and keyword arguments. When the
lock or semaphore is acquired, the callable will be invoked
with those arguments.
The callable may return a Deferred; if it does, the lock or
semaphore won't be released until that Deferred fires.
@return: Deferred of function result.
"""
if len(args) < 2:
if not args:
raise TypeError("run() takes at least 2 arguments, none given.")
raise TypeError("%s.run() takes at least 2 arguments, 1 given" % (
args[0].__class__.__name__,))
self, f = args[:2]
args = args[2:]
def execute(ignoredResult):
d = maybeDeferred(f, *args, **kwargs)
d.addBoth(self._releaseAndReturn)
return d
d = self.acquire()
d.addCallback(execute)
return d
class DeferredLock(_ConcurrencyPrimitive):
"""
A lock for event driven systems.
@ivar locked: True when this Lock has been acquired, false at all
other times. Do not change this value, but it is useful to
examine for the equivalent of a \"non-blocking\" acquisition.
"""
locked = 0
def acquire(self):
"""Attempt to acquire the lock.
@return: a Deferred which fires on lock acquisition.
"""
d = Deferred()
if self.locked:
self.waiting.append(d)
else:
self.locked = 1
d.callback(self)
return d
def release(self):
"""Release the lock.
Should be called by whomever did the acquire() when the shared
resource is free.
"""
assert self.locked, "Tried to release an unlocked lock"
self.locked = 0
if self.waiting:
# someone is waiting to acquire lock
self.locked = 1
d = self.waiting.pop(0)
d.callback(self)
class DeferredSemaphore(_ConcurrencyPrimitive):
"""
A semaphore for event driven systems.
"""
def __init__(self, tokens):
_ConcurrencyPrimitive.__init__(self)
self.tokens = tokens
self.limit = tokens
def acquire(self):
"""Attempt to acquire the token.
@return: a Deferred which fires on token acquisition.
"""
assert self.tokens >= 0, "Internal inconsistency?? tokens should never be negative"
d = Deferred()
if not self.tokens:
self.waiting.append(d)
else:
self.tokens = self.tokens - 1
d.callback(self)
return d
def release(self):
"""Release the token.
Should be called by whoever did the acquire() when the shared
resource is free.
"""
assert self.tokens < self.limit, "Someone released me too many times: too many tokens!"
self.tokens = self.tokens + 1
if self.waiting:
# someone is waiting to acquire token
self.tokens = self.tokens - 1
d = self.waiting.pop(0)
d.callback(self)
class QueueOverflow(Exception):
pass
class QueueUnderflow(Exception):
pass
class DeferredQueue(object):
"""
An event driven queue.
Objects may be added as usual to this queue. When an attempt is
made to retrieve an object when the queue is empty, a Deferred is
returned which will fire when an object becomes available.
@ivar size: The maximum number of objects to allow into the queue
at a time. When an attempt to add a new object would exceed this
limit, QueueOverflow is raised synchronously. None for no limit.
@ivar backlog: The maximum number of Deferred gets to allow at
one time. When an attempt is made to get an object which would
exceed this limit, QueueUnderflow is raised synchronously. None
for no limit.
"""
def __init__(self, size=None, backlog=None):
self.waiting = []
self.pending = []
self.size = size
self.backlog = backlog
def put(self, obj):
"""Add an object to this queue.
@raise QueueOverflow: Too many objects are in this queue.
"""
if self.waiting:
self.waiting.pop(0).callback(obj)
elif self.size is None or len(self.pending) < self.size:
self.pending.append(obj)
else:
raise QueueOverflow()
def get(self):
"""Attempt to retrieve and remove an object from the queue.
@return: a Deferred which fires with the next object available in the queue.
@raise QueueUnderflow: Too many (more than C{backlog})
Deferreds are already waiting for an object from this queue.
"""
if self.pending:
return succeed(self.pending.pop(0))
elif self.backlog is None or len(self.waiting) < self.backlog:
d = Deferred()
self.waiting.append(d)
return d
else:
raise QueueUnderflow()
class AlreadyTryingToLockError(Exception):
"""
Raised when DeferredFilesystemLock.deferUntilLocked is called twice on a
single DeferredFilesystemLock.
"""
class DeferredFilesystemLock(lockfile.FilesystemLock):
"""
A FilesystemLock that allows for a deferred to be fired when the lock is
acquired.
@ivar _scheduler: The object in charge of scheduling retries. In this
implementation this is parameterized for testing.
@ivar _interval: The retry interval for an L{IReactorTime} based scheduler.
@ivar _tryLockCall: A L{DelayedCall} based on _interval that will managex
the next retry for aquiring the lock.
@ivar _timeoutCall: A L{DelayedCall} based on deferUntilLocked's timeout
argument. This is in charge of timing out our attempt to acquire the
lock.
"""
_interval = 1
_tryLockCall = None
_timeoutCall = None
def __init__(self, name, scheduler=None):
"""
@param name: The name of the lock to acquire
@param scheduler: An object which provides L{IReactorTime}
"""
lockfile.FilesystemLock.__init__(self, name)
if scheduler is None:
from twisted.internet import reactor
scheduler = reactor
self._scheduler = scheduler
def deferUntilLocked(self, timeout=None):
"""
Wait until we acquire this lock. This method is not safe for
concurrent use.
@type timeout: C{float} or C{int}
@param timeout: the number of seconds after which to time out if the
lock has not been acquired.
@return: a deferred which will callback when the lock is acquired, or
errback with a L{TimeoutError} after timing out or an
L{AlreadyTryingToLockError} if the L{deferUntilLocked} has already
been called and not successfully locked the file.
"""
if self._tryLockCall is not None:
return fail(
AlreadyTryingToLockError(
"deferUntilLocked isn't safe for concurrent use."))
d = Deferred()
def _cancelLock():
self._tryLockCall.cancel()
self._tryLockCall = None
self._timeoutCall = None
if self.lock():
d.callback(None)
else:
d.errback(failure.Failure(
TimeoutError("Timed out aquiring lock: %s after %fs" % (
self.name,
timeout))))
def _tryLock():
if self.lock():
if self._timeoutCall is not None:
self._timeoutCall.cancel()
self._timeoutCall = None
self._tryLockCall = None
d.callback(None)
else:
if timeout is not None and self._timeoutCall is None:
self._timeoutCall = self._scheduler.callLater(
timeout, _cancelLock)
self._tryLockCall = self._scheduler.callLater(
self._interval, _tryLock)
_tryLock()
return d
__all__ = ["Deferred", "DeferredList", "succeed", "fail", "FAILURE", "SUCCESS",
"AlreadyCalledError", "TimeoutError", "gatherResults",
"maybeDeferred",
"waitForDeferred", "deferredGenerator", "inlineCallbacks",
"returnValue",
"DeferredLock", "DeferredSemaphore", "DeferredQueue",
"DeferredFilesystemLock", "AlreadyTryingToLockError",
]
| gpl-3.0 |
adwu73/robotframework-selenium2library | test/lib/mockito/mockito.py | 70 | 3297 | #!/usr/bin/env python
# coding: utf-8
import verification
from mocking import mock, TestDouble
from mock_registry import mock_registry
from verification import VerificationError
__copyright__ = "Copyright 2008-2010, Mockito Contributors"
__license__ = "MIT"
__maintainer__ = "Mockito Maintainers"
__email__ = "mockito-python@googlegroups.com"
class ArgumentError(Exception):
pass
def _multiple_arguments_in_use(*args):
return len(filter(lambda x: x, args)) > 1
def _invalid_argument(value):
return (value is not None and value < 1) or value == 0
def _invalid_between(between):
if between is not None:
start, end = between
if start > end or start < 0:
return True
return False
def verify(obj, times=1, atleast=None, atmost=None, between=None, inorder=False):
if times < 0:
raise ArgumentError("""'times' argument has invalid value.
It should be at least 0. You wanted to set it to: %i""" % times)
if _multiple_arguments_in_use(atleast, atmost, between):
raise ArgumentError("""Sure you know what you are doing?
You can set only one of the arguments: 'atleast', 'atmost' or 'between'.""")
if _invalid_argument(atleast):
raise ArgumentError("""'atleast' argument has invalid value.
It should be at least 1. You wanted to set it to: %i""" % atleast)
if _invalid_argument(atmost):
raise ArgumentError("""'atmost' argument has invalid value.
It should be at least 1. You wanted to set it to: %i""" % atmost)
if _invalid_between(between):
raise ArgumentError("""'between' argument has invalid value.
It should consist of positive values with second number not greater than first
e.g. [1, 4] or [0, 3] or [2, 2]
You wanted to set it to: %s""" % between)
if isinstance(obj, TestDouble):
mocked_object = obj
else:
mocked_object = mock_registry.mock_for(obj)
if atleast:
mocked_object.verification = verification.AtLeast(atleast)
elif atmost:
mocked_object.verification = verification.AtMost(atmost)
elif between:
mocked_object.verification = verification.Between(*between)
else:
mocked_object.verification = verification.Times(times)
if inorder:
mocked_object.verification = verification.InOrder(mocked_object.verification)
return mocked_object
def when(obj, strict=True):
if isinstance(obj, mock):
theMock = obj
else:
theMock = mock_registry.mock_for(obj)
if theMock is None:
theMock = mock(obj, strict=strict)
# If we call when on something that is not TestDouble that means we're trying to stub real object,
# (class, module etc.). Not to be confused with generating stubs from real classes.
theMock.stubbing_real_object = True
theMock.expect_stubbing()
return theMock
def unstub():
"""Unstubs all stubbed methods and functions"""
mock_registry.unstub_all()
def verifyNoMoreInteractions(*mocks):
for mock in mocks:
for i in mock.invocations:
if not i.verified:
raise VerificationError("\nUnwanted interaction: " + str(i))
def verifyZeroInteractions(*mocks):
verifyNoMoreInteractions(*mocks)
| apache-2.0 |
trailbehind/RenderProgressTracker | RenderProgress/models.py | 1 | 1181 | from django.contrib.gis.db import models
import datetime
class Dataset(models.Model):
name = models.CharField(max_length=100)
identifier = models.CharField(max_length=50, unique=True, primary_key=True, db_index=True)
objects = models.GeoManager()
def __unicode__(self):
return self.name
PROGRESS_CHOICES = (
('running', 'Running'),
('complete', 'Complete'),
('failed', 'Failed'),
)
class RenderBlock(models.Model):
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
identifier = models.CharField(max_length=20)
state = models.CharField(max_length=20, choices=PROGRESS_CHOICES, blank=True, null=True)
source = models.CharField(max_length=200)
bounds = models.PolygonField(blank=True, null=True)
dataset = models.ForeignKey(Dataset, related_name="blocks", db_index=True)
objects = models.GeoManager()
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = datetime.datetime.today()
self.modified = datetime.datetime.today()
return super(RenderBlock, self).save(*args, **kwargs)
| mit |
prospwro/odoo | addons/sale_mrp/tests/test_move_explode.py | 225 | 5190 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMoveExplode(common.TransactionCase):
def setUp(self):
super(TestMoveExplode, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.sale_order_line = self.registry('sale.order.line')
self.sale_order = self.registry('sale.order')
self.mrp_bom = self.registry('mrp.bom')
self.product = self.registry('product.product')
#product that has a phantom bom
self.product_bom_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_3')[1]
#bom with that product
self.bom_id = self.ir_model_data.get_object_reference(cr, uid, 'mrp', 'mrp_bom_9')[1]
#partner agrolait
self.partner_id = self.ir_model_data.get_object_reference(cr, uid, 'base', 'res_partner_1')[1]
#bom: PC Assemble (with property: DDR 512MB)
self.bom_prop_id = self.ir_model_data.get_object_reference(cr, uid, 'mrp', 'mrp_bom_property_0')[1]
self.template_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_3_product_template')[1]
#property: DDR 512MB
self.mrp_property_id = self.ir_model_data.get_object_reference(cr, uid, 'mrp', 'mrp_property_0')[1]
#product: RAM SR2
self.product_bom_prop_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_14')[1]
#phantom bom for RAM SR2 with three lines containing properties
self.bom_prop_line_id = self.ir_model_data.get_object_reference(cr, uid, 'mrp', 'mrp_bom_property_line')[1]
#product: iPod included in the phantom bom
self.product_A_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_11')[1]
#product: Mouse, Wireless included in the phantom bom
self.product_B_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_12')[1]
def test_00_sale_move_explode(self):
"""check that when creating a sale order with a product that has a phantom BoM, move explode into content of the
BoM"""
cr, uid, context = self.cr, self.uid, {}
#create sale order with one sale order line containing product with a phantom bom
so_id = self.sale_order.create(cr, uid, vals={'partner_id': self.partner_id}, context=context)
self.sale_order_line.create(cr, uid, values={'order_id': so_id, 'product_id': self.product_bom_id, 'product_uom_qty': 1}, context=context)
#confirm sale order
self.sale_order.action_button_confirm(cr, uid, [so_id], context=context)
#get all move associated to that sale_order
browse_move_ids = self.sale_order.browse(cr, uid, so_id, context=context).picking_ids[0].move_lines
move_ids = [x.id for x in browse_move_ids]
#we should have same amount of move as the component in the phatom bom
bom = self.mrp_bom.browse(cr, uid, self.bom_id, context=context)
bom_component_length = self.mrp_bom._bom_explode(cr, uid, bom, self.product_bom_id, 1, [])
self.assertEqual(len(move_ids), len(bom_component_length[0]))
def test_00_bom_find(self):
"""Check that _bom_find searches the bom corresponding to the properties passed or takes the bom with the smallest
sequence."""
cr, uid, context = self.cr, self.uid, {}
res_id = self.mrp_bom._bom_find(cr, uid, product_tmpl_id=self.template_id, product_id=None, properties=[self.mrp_property_id], context=context)
self.assertEqual(res_id, self.bom_prop_id)
def test_00_bom_explode(self):
"""Check that _bom_explode only takes the lines with the right properties."""
cr, uid, context = self.cr, self.uid, {}
bom = self.mrp_bom.browse(cr, uid, self.bom_prop_line_id)
product = self.product.browse(cr, uid, self.product_bom_prop_id)
res = self.mrp_bom._bom_explode(cr, uid, bom, product, 1, properties=[self.mrp_property_id], context=context)
res = set([p['product_id'] for p in res[0]])
self.assertEqual(res, set([self.product_A_id, self.product_B_id]))
| agpl-3.0 |
rikirenz/inspire-next | inspirehep/modules/forms/field_base.py | 6 | 10404 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Implementation of validators, post-processors and auto-complete functions.
Validators
----------
Following is a short overview over how validators may be defined for fields.
Inline validators (always executed)::
class MyForm(...):
myfield = MyField()
def validate_myfield(form, field):
raise ValidationError("Message")
External validators (always executed)::
def my_validator(form, field):
raise ValidationError("Message")
class MyForm(...):
myfield = MyField(validators=[my_validator])
Field defined validators (always executed)::
class MyField(...):
# ...
def pre_validate(self, form):
raise ValidationError("Message")
Default field validators (executed only if external validators are not
defined)::
class MyField(...):
def __init__(self, **kwargs):
defaults = dict(validators=[my_validator])
defaults.update(kwargs)
super(MyField, self).__init__(**defaults)
See http://wtforms.simplecodes.com/docs/1.0.4/validators.html for how to
write validators.
Post-processors
---------------
Post processors follows the same pattern as validators. You may thus specify:
- Inline processors:::
Form.post_process_<field>(form, field)
- External processors:::
def my_processor(form, field):
...
myfield = MyField(processors=[my_processor])
- Field defined processors (please method documentation):::
Field.post_process(self, form, extra_processors=[])
Auto-complete
-------------
- External auto-completion function:::
def my_autocomplete(form, field, limit=50):
...
myfield = MyField(autocomplete=my_autocomplete)
- Field defined auto-completion function (please method documentation):::
Field.autocomplete(self, form, limit=50)
"""
from __future__ import absolute_import, division, print_function
import warnings
from wtforms import Field
from .form import CFG_FIELD_FLAGS
class INSPIREField(Field):
"""Base field that all webdeposit fields must inherit from."""
def __init__(self, *args, **kwargs):
"""
Initialize WebDeposit field.
Every field is associated with a marc field. To define this association
you have to specify the `export_key` for the recordext `Reader` or the
`cook_function` (for more complicated fields).
:param placeholder: str, Placeholder text for input fields.
:param icon: Name of icon (rendering of the icon is done by templates)
:type icon: str
:param autocomplete: callable, A function to auto-complete values
for field.
:param processors: list of callables, List of processors to run
for field.
:param validators: list of callables, List of WTForm validators.
If no validators are provided, validators defined
in webdeposit_config will be loaded.
:param hidden: Set to true to hide field. Default: False
:type hidden: bool
:param disabled: Set to true to disable field. Default: False
:type disabled: bool
:param export_key: Name of key to use during export
:type export_key: str or callable
:param preamble: Short text that should appear in before the field,
usually meant for longer explanations.
:type preamble: str
:see http://wtforms.simplecodes.com/docs/1.0.4/validators.html for
how to write validators.
:see http://wtforms.simplecodes.com/docs/1.0.4/fields.html for further
keyword argument that can be provided on field initialization.
"""
# Pop WebDeposit specific kwargs before calling super()
self.placeholder = kwargs.pop('placeholder', None)
self.group = kwargs.pop('group', None)
self.icon = kwargs.pop('icon', None)
self.autocomplete = kwargs.pop('autocomplete', None)
self.autocomplete_fn = kwargs.pop('autocomplete_fn', None)
self.processors = kwargs.pop('processors', None)
self.export_key = kwargs.pop('export_key', None)
self.widget_classes = kwargs.pop('widget_classes', None)
self.autocomplete_limit = kwargs.pop('autocomplete_limit', 20)
self.readonly = kwargs.pop('readonly', None)
self.preamble = kwargs.pop('preamble', None)
# Initialize empty message variables, which are usually modified
# during the post-processing phases.
self._messages = []
self._message_state = ''
# Get flag values (e.g. hidden, disabled) before super() call.
# See CFG_FIELD_FLAGS for all defined flags.
flag_values = {}
for flag in CFG_FIELD_FLAGS:
flag_values[flag] = kwargs.pop(flag, False)
# Call super-constructor.
super(INSPIREField, self).__init__(*args, **kwargs)
# Set flag values after super() call to ensure, flags set during
# super() are overwritten.
for flag, value in flag_values.items():
if value:
setattr(self.flags, flag, True)
if callable(self.autocomplete):
warnings.warn("Autocomplete functions use now "
"'autocomplete_fn' attribute",
DeprecationWarning)
self.autocomplete_fn = self.autocomplete
self.autocomplete = None
def __call__(self, *args, **kwargs):
"""Set custom keyword arguments when rendering field."""
if 'placeholder' not in kwargs and self.placeholder:
kwargs['placeholder'] = self.placeholder
if 'disabled' not in kwargs and self.flags.disabled:
kwargs['disabled'] = "disabled"
if 'class_' in kwargs and self.widget_classes:
kwargs['class_'] = kwargs['class_'] + self.widget_classes
elif self.widget_classes:
kwargs['class_'] = self.widget_classes
if self.autocomplete:
kwargs['data-autocomplete'] = self.autocomplete
kwargs['data-autocomplete-limit'] = self.autocomplete_limit
elif self.autocomplete_fn:
kwargs['data-autocomplete'] = "default"
if self.readonly:
kwargs['readonly'] = self.readonly
return super(INSPIREField, self).__call__(*args, **kwargs)
def reset_field_data(self, exclude=[]):
"""Reset the ``fields.data`` value to that of ``field.object_data``.
Usually not called directly, but rather through Form.reset_field_data()
:param exclude: List of formfield names to exclude.
"""
if self.name not in exclude:
self.data = self.object_data
def post_process(self, form=None, formfields=[], extra_processors=[],
submit=False):
"""Post process form before saving.
Usually you can do some of the following tasks in the post
processing:
* Set field flags (e.g. self.flags.hidden = True or
form.<field>.flags.hidden = True).
* Set messages (e.g. self.messages.append('text') and
self.message_state = 'info').
* Set values of other fields (e.g. form.<field>.data = '').
Processors may stop the processing chain by raising StopIteration.
IMPORTANT: By default the method will execute custom post processors
defined in the webdeposit_config. If you override the method, be
sure to call this method to ensure extra processors are called::
super(MyField, self).post_process(
form, extra_processors=extra_processors
)
"""
# Check if post processing should run for this field
if self.name in formfields or not formfields:
stop = False
for p in (self.processors or []):
try:
p(form, self, submit=submit, fields=formfields)
except StopIteration:
stop = True
break
if not stop:
for p in (extra_processors or []):
p(form, self, submit=submit, fields=formfields)
def perform_autocomplete(self, form, name, term, limit=50):
"""Run auto-complete method for field.
This method should not be called directly, instead use
Form.autocomplete().
"""
if name == self.name and self.autocomplete_fn:
return self.autocomplete_fn(form, self, term, limit=limit)
return None
def add_message(self, msg, state=None):
"""Add a message.
:param msg: The message to set
:param state: State of message; info, warning, error, success.
"""
self._messages.append(msg)
if state:
self._message_state = state
def set_flags(self, flags):
"""Set field flags."""
field_flags = flags.get(self.name, [])
for check_flag in CFG_FIELD_FLAGS:
setattr(self.flags, check_flag, check_flag in field_flags)
@property
def messages(self):
"""Retrieve field messages."""
if self.errors:
return {self.name: dict(
state='error',
messages=self.errors
)}
else:
return {self.name: dict(
state=getattr(self, '_message_state', ''),
messages=self._messages
)}
| gpl-3.0 |
JeremyGrosser/quisk | src/usb/_interop.py | 4 | 4606 | # Copyright (C) 2009-2010 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
# All the hacks necessary to assure compatibility across all
# supported versions come here.
# Please, note that there is one version check for each
# hack we need to do, this makes maintenance easier... ^^
import sys
__all__ = ['_reduce', '_set', '_next', '_groupby', '_sorted', '_update_wrapper']
# we support Python >= 2.3
assert sys.hexversion >= 0x020300f0
# On Python 3, reduce became a functools module function
try:
import functools
_reduce = functools.reduce
except (ImportError, AttributeError):
_reduce = reduce
# we only have the builtin set type since 2.5 version
try:
_set = set
except NameError:
import sets
_set = sets.Set
# On Python >= 2.6, we have the builtin next() function
# On Python 2.5 and before, we have to call the iterator method next()
def _next(iter):
try:
return next(iter)
except NameError:
return iter.next()
# groupby is available only since 2.4 version
try:
import itertools
_groupby = itertools.groupby
except (ImportError, AttributeError):
# stolen from Python docs
class _groupby(object):
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = _next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = _next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
# builtin sorted function is only availale since 2.4 version
try:
_sorted = sorted
except NameError:
def _sorted(l, key=None, reverse=False):
# sort function on Python 2.3 does not
# support 'key' parameter
class KeyToCmp(object):
def __init__(self, K):
self.key = K
def __call__(self, x, y):
kx = self.key(x)
ky = self.key(y)
if kx < ky:
return reverse and 1 or -1
elif kx > ky:
return reverse and -1 or 1
else:
return 0
tmp = list(l)
tmp.sort(KeyToCmp(key))
return tmp
try:
import functools
_update_wrapper = functools.update_wrapper
except (ImportError, AttributeError):
def _update_wrapper(wrapper, wrapped):
wrapper.__name__ = wrapped.__name__
wrapper.__module__ = wrapped.__module__
wrapper.__doc__ = wrapped.__doc__
wrapper.__dict__ = wrapped.__dict__
| gpl-2.0 |
lxybox1/MissionPlanner | Lib/site-packages/numpy/doc/ufuncs.py | 94 | 5362 | """
===================
Universal Functions
===================
Ufuncs are, generally speaking, mathematical functions or operations that are
applied element-by-element to the contents of an array. That is, the result
in each output array element only depends on the value in the corresponding
input array (or arrays) and on no other array elements. Numpy comes with a
large suite of ufuncs, and scipy extends that suite substantially. The simplest
example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
The unfunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
intended to address the more general aspects of unfuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
Type coercion
=============
What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
two different types? What is the type of the result? Typically, the result is
the higher of the two types. For example: ::
float32 + float64 -> float64
int8 + int32 -> int32
int16 + float32 -> float32
float32 + complex64 -> complex64
There are some less obvious cases generally involving mixes of types
(e.g. uints, ints and floats) where equal bit sizes for each are not
capable of saving all the information in a different type of equivalent
bit size. Some examples are int32 vs float32 or uint32 vs int32.
Generally, the result is the higher type of larger size than both
(if available). So: ::
int32 + float32 -> float64
uint32 + int32 -> int64
Finally, the type coercion behavior when expressions involve Python
scalars is different than that seen for arrays. Since Python has a
limited number of types, combining a Python int with a dtype=np.int8
array does not coerce to the higher type but instead, the type of the
array prevails. So the rules for Python scalars combined with arrays is
that the result will be that of the array equivalent the Python scalar
if the Python scalar is of a higher 'kind' than the array (e.g., float
vs. int), otherwise the resultant type will be that of the array.
For example: ::
Python int + int8 -> int8
Python float + int8 -> float64
ufunc methods
=============
Binary ufuncs support 4 methods.
**.reduce(arr)** applies the binary operator to elements of the array in
sequence. For example: ::
>>> np.add.reduce(np.arange(10)) # adds all elements of array
45
For multidimensional arrays, the first dimension is reduced by default: ::
>>> np.add.reduce(np.arange(10).reshape(2,5))
array([ 5, 7, 9, 11, 13])
The axis keyword can be used to specify different axes to reduce: ::
>>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
array([10, 35])
**.accumulate(arr)** applies the binary operator and generates an an
equivalently shaped array that includes the accumulated amount for each
element of the array. A couple examples: ::
>>> np.add.accumulate(np.arange(10))
array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
>>> np.multiply.accumulate(np.arange(1,9))
array([ 1, 2, 6, 24, 120, 720, 5040, 40320])
The behavior for multidimensional arrays is the same as for .reduce(),
as is the use of the axis keyword).
**.reduceat(arr,indices)** allows one to apply reduce to selected parts
of an array. It is a difficult method to understand. See the documentation
at:
**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
arr2. It will work on multidimensional arrays (the shape of the result is
the concatenation of the two input shapes.: ::
>>> np.multiply.outer(np.arange(3),np.arange(4))
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6]])
Output arguments
================
All ufuncs accept an optional output array. The array must be of the expected
output shape. Beware that if the type of the output array is of a different
(and lower) type than the output result, the results may be silently truncated
or otherwise corrupted in the downcast to the lower type. This usage is useful
when one wants to avoid creating large temporary arrays and instead allows one
to reuse the same array memory repeatedly (at the expense of not being able to
use more convenient operator notation in expressions). Note that when the
output argument is used, the ufunc still returns a reference to the result.
>>> x = np.arange(2)
>>> np.add(np.arange(2),np.arange(2.),x)
array([0, 2])
>>> x
array([0, 2])
and & or as ufuncs
==================
Invariably people try to use the python 'and' and 'or' as logical operators
(and quite understandably). But these operators do not behave as normal
operators since Python treats these quite differently. They cannot be
overloaded with array equivalents. Thus using 'and' or 'or' with an array
results in an error. There are two alternatives:
1) use the ufunc functions logical_and() and logical_or().
2) use the bitwise operators & and \\|. The drawback of these is that if
the arguments to these operators are not boolean arrays, the result is
likely incorrect. On the other hand, most usages of logical_and and
logical_or are with boolean arrays. As long as one is careful, this is
a convenient way to apply these operators.
"""
| gpl-3.0 |
wilvk/ansible | lib/ansible/module_utils/facts/network/netbsd.py | 232 | 1801 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class NetBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the NetBSD Network Class.
It uses the GenericBsdIfconfigNetwork
"""
platform = 'NetBSD'
def parse_media_line(self, words, current_if, ips):
# example of line:
# $ ifconfig
# ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# ec_capabilities=1<VLAN_MTU>
# ec_enabled=0
# address: 00:20:91:45:00:78
# media: Ethernet 10baseT full-duplex
# inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2]
if len(words) > 3:
current_if['media_options'] = words[3].split(',')
class NetBSDNetworkCollector(NetworkCollector):
_fact_class = NetBSDNetwork
_platform = 'NetBSD'
| gpl-3.0 |
giserh/grab | grab/spider/decorators.py | 11 | 1642 | import functools
import logging
from weblib.error import ResponseNotValid
def integrity(integrity_func, integrity_errors=(ResponseNotValid,),
ignore_errors=()):
"""
Args:
:param integrity_func: couldb callable or string contains name of
method to call
"""
def build_decorator(func):
@functools.wraps(func)
def func_wrapper(self, grab, task):
if isinstance(integrity_func, (list, tuple)):
int_funcs = integrity_func
else:
int_funcs = [integrity_func]
try:
for int_func in int_funcs:
if isinstance(int_func, str):
getattr(self, int_func)(grab)
else:
int_func(grab)
except ignore_errors as ex:
self.stat.inc(ex.__class__.__name__)
grab.meta['integrity_error'] = ex
result = func(self, grab, task)
if result is not None:
for event in result:
yield event
except integrity_errors as ex:
yield task.clone(refresh_cache=True)
self.stat.inc(ex.__class__.__name__)
#logging.error(ex)
except Exception as ex:
raise
else:
grab.meta['integrity_error'] = None
result = func(self, grab, task)
if result is not None:
for event in result:
yield event
return func_wrapper
return build_decorator
| mit |
dnmfarrell/zulip | zilencer/management/commands/populate_db.py | 113 | 34944 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from django.contrib.sites.models import Site
from zerver.models import Message, UserProfile, Stream, Recipient, Client, \
Subscription, Huddle, get_huddle, Realm, UserMessage, \
get_huddle_hash, clear_database, get_client, get_user_profile_by_id, \
split_email_to_domain, email_to_username
from zerver.lib.actions import do_send_message, set_default_streams, \
do_activate_user, do_deactivate_user, do_change_password, do_change_is_admin
from zerver.lib.parallel import run_parallel
from django.db.models import Count
from django.conf import settings
from zerver.lib.bulk_create import bulk_create_realms, \
bulk_create_streams, bulk_create_users, bulk_create_huddles, \
bulk_create_clients
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.models import MAX_MESSAGE_LENGTH
from zerver.models import DefaultStream, get_stream
from zilencer.models import Deployment
import ujson
import datetime
import random
import glob
import os
from optparse import make_option
settings.TORNADO_SERVER = None
def create_users(realms, name_list, bot=False):
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realms, user_set, bot)
def create_streams(realms, realm, stream_list):
stream_set = set()
for stream_name in stream_list:
stream_set.add((realm.domain, stream_name))
bulk_create_streams(realms, stream_set)
class Command(BaseCommand):
help = "Populate a test database"
option_list = BaseCommand.option_list + (
make_option('-n', '--num-messages',
dest='num_messages',
type='int',
default=600,
help='The number of messages to create.'),
make_option('--extra-users',
dest='extra_users',
type='int',
default=0,
help='The number of extra users to create'),
make_option('--huddles',
dest='num_huddles',
type='int',
default=3,
help='The number of huddles to create.'),
make_option('--personals',
dest='num_personals',
type='int',
default=6,
help='The number of personal pairs to create.'),
make_option('--threads',
dest='threads',
type='int',
default=10,
help='The number of threads to use.'),
make_option('--percent-huddles',
dest='percent_huddles',
type='float',
default=15,
help='The percent of messages to be huddles.'),
make_option('--percent-personals',
dest='percent_personals',
type='float',
default=15,
help='The percent of messages to be personals.'),
make_option('--stickyness',
dest='stickyness',
type='float',
default=20,
help='The percent of messages to repeat recent folks.'),
make_option('--nodelete',
action="store_false",
default=True,
dest='delete',
help='Whether to delete all the existing messages.'),
make_option('--test-suite',
default=False,
action="store_true",
help='Whether to delete all the existing messages.'),
make_option('--replay-old-messages',
action="store_true",
default=False,
dest='replay_old_messages',
help='Whether to replace the log of old messages.'),
)
def handle(self, **options):
if options["percent_huddles"] + options["percent_personals"] > 100:
self.stderr.write("Error! More than 100% of messages allocated.\n")
return
if options["delete"]:
# Start by clearing all the data in our database
clear_database()
# Create our two default realms
zulip_realm = Realm.objects.create(domain="zulip.com", name="Zulip Dev")
if options["test_suite"]:
Realm.objects.create(domain="mit.edu")
realms = {}
for realm in Realm.objects.all():
realms[realm.domain] = realm
# Create test Users (UserProfiles are automatically created,
# as are subscriptions to the ability to receive personals).
names = [("Othello, the Moor of Venice", "othello@zulip.com"), ("Iago", "iago@zulip.com"),
("Prospero from The Tempest", "prospero@zulip.com"),
("Cordelia Lear", "cordelia@zulip.com"), ("King Hamlet", "hamlet@zulip.com")]
for i in xrange(options["extra_users"]):
names.append(('Extra User %d' % (i,), 'extrauser%d@zulip.com' % (i,)))
create_users(realms, names)
iago = UserProfile.objects.get(email="iago@zulip.com")
do_change_is_admin(iago, True)
# Create public streams.
stream_list = ["Verona", "Denmark", "Scotland", "Venice", "Rome"]
create_streams(realms, zulip_realm, stream_list)
recipient_streams = [Stream.objects.get(name=name, realm=zulip_realm).id for name in stream_list]
# Create subscriptions to streams
subscriptions_to_add = []
profiles = UserProfile.objects.select_related().all()
for i, profile in enumerate(profiles):
# Subscribe to some streams.
for type_id in recipient_streams[:int(len(recipient_streams) *
float(i)/len(profiles)) + 1]:
r = Recipient.objects.get(type=Recipient.STREAM, type_id=type_id)
s = Subscription(recipient=r, user_profile=profile)
subscriptions_to_add.append(s)
Subscription.objects.bulk_create(subscriptions_to_add)
else:
zulip_realm = Realm.objects.get(domain="zulip.com")
recipient_streams = [klass.type_id for klass in
Recipient.objects.filter(type=Recipient.STREAM)]
# Extract a list of all users
user_profiles = [user_profile.id for user_profile in UserProfile.objects.all()]
# Create several initial huddles
for i in xrange(options["num_huddles"]):
get_huddle(random.sample(user_profiles, random.randint(3, 4)))
# Create several initial pairs for personals
personals_pairs = [random.sample(user_profiles, 2)
for i in xrange(options["num_personals"])]
threads = options["threads"]
jobs = []
for i in xrange(threads):
count = options["num_messages"] / threads
if i < options["num_messages"] % threads:
count += 1
jobs.append((count, personals_pairs, options, self.stdout.write))
for job in jobs:
send_messages(job)
if options["delete"]:
# Create the "website" and "API" clients; if we don't, the
# default values in zerver/decorators.py will not work
# with the Django test suite.
get_client("website")
get_client("API")
if options["test_suite"]:
# Create test users; the MIT ones are needed to test
# the Zephyr mirroring codepaths.
testsuite_mit_users = [
("Fred Sipb (MIT)", "sipbtest@mit.edu"),
("Athena Consulting Exchange User (MIT)", "starnine@mit.edu"),
("Esp Classroom (MIT)", "espuser@mit.edu"),
]
create_users(realms, testsuite_mit_users)
# These bots are directly referenced from code and thus
# are needed for the test suite.
all_realm_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
zulip_realm_bots = [
("Zulip New User Bot", "new-user-bot@zulip.com"),
("Zulip Error Bot", "error-bot@zulip.com"),
]
zulip_realm_bots.extend(all_realm_bots)
create_users(realms, zulip_realm_bots, bot=True)
if not options["test_suite"]:
# To keep the messages.json fixtures file for the test
# suite fast, don't add these users and subscriptions
# when running populate_db for the test suite
zulip_stream_list = ["devel", "all", "zulip", "design", "support", "social", "test",
"errors", "sales"]
create_streams(realms, zulip_realm, zulip_stream_list)
# Add a few default streams
for stream_name in ["design", "devel", "social", "support"]:
DefaultStream.objects.create(realm=zulip_realm, stream=get_stream(stream_name, zulip_realm))
# Now subscribe everyone to these streams
subscriptions_to_add = []
profiles = UserProfile.objects.select_related().filter(realm=zulip_realm)
for cls in zulip_stream_list:
stream = Stream.objects.get(name=cls, realm=zulip_realm)
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
for profile in profiles:
# Subscribe to some streams.
s = Subscription(recipient=recipient, user_profile=profile)
subscriptions_to_add.append(s)
Subscription.objects.bulk_create(subscriptions_to_add)
# These bots are not needed by the test suite
internal_zulip_users_nosubs = [
("Zulip Commit Bot", "commit-bot@zulip.com"),
("Zulip Trac Bot", "trac-bot@zulip.com"),
("Zulip Nagios Bot", "nagios-bot@zulip.com"),
("Zulip Feedback Bot", "feedback@zulip.com"),
]
create_users(realms, internal_zulip_users_nosubs, bot=True)
# Mark all messages as read
UserMessage.objects.all().update(flags=UserMessage.flags.read)
self.stdout.write("Successfully populated test database.\n")
if options["replay_old_messages"]:
restore_saved_messages()
recipient_hash = {}
def get_recipient_by_id(rid):
if rid in recipient_hash:
return recipient_hash[rid]
return Recipient.objects.get(id=rid)
def restore_saved_messages():
old_messages = []
duplicate_suppression_hash = {}
stream_dict = {}
user_set = set()
email_set = set([u.email for u in UserProfile.objects.all()])
realm_set = set()
# Initial client_set is nonempty temporarily because we don't have
# clients in logs at all right now -- later we can start with nothing.
client_set = set(["populate_db", "website", "zephyr_mirror"])
huddle_user_set = set()
# First, determine all the objects our messages will need.
print datetime.datetime.now(), "Creating realms/streams/etc..."
def process_line(line):
old_message_json = line.strip()
# Due to populate_db's shakespeare mode, we have a lot of
# duplicate messages in our log that only differ in their
# logged ID numbers (same timestamp, content, etc.). With
# sqlite, bulk creating those messages won't work properly: in
# particular, the first 100 messages will actually only result
# in 20 rows ending up in the target table, which screws up
# the below accounting where for handling changing
# subscriptions, we assume that the Nth row populate_db
# created goes with the Nth non-subscription row of the input
# So suppress the duplicates when using sqlite.
if "sqlite" in settings.DATABASES["default"]["ENGINE"]:
tmp_message = ujson.loads(old_message_json)
tmp_message['id'] = '1'
duplicate_suppression_key = ujson.dumps(tmp_message)
if duplicate_suppression_key in duplicate_suppression_hash:
return
duplicate_suppression_hash[duplicate_suppression_key] = True
old_message = ujson.loads(old_message_json)
message_type = old_message["type"]
# Lower case emails and domains; it will screw up
# deduplication if we don't
def fix_email(email):
return email.strip().lower()
if message_type in ["stream", "huddle", "personal"]:
old_message["sender_email"] = fix_email(old_message["sender_email"])
# Fix the length on too-long messages before we start processing them
if len(old_message["content"]) > MAX_MESSAGE_LENGTH:
old_message["content"] = "[ This message was deleted because it was too long ]"
if message_type in ["subscription_added", "subscription_removed"]:
old_message["domain"] = old_message["domain"].lower()
old_message["user"] = fix_email(old_message["user"])
elif message_type == "subscription_property":
old_message["user"] = fix_email(old_message["user"])
elif message_type == "user_email_changed":
old_message["old_email"] = fix_email(old_message["old_email"])
old_message["new_email"] = fix_email(old_message["new_email"])
elif message_type.startswith("user_"):
old_message["user"] = fix_email(old_message["user"])
elif message_type.startswith("enable_"):
old_message["user"] = fix_email(old_message["user"])
if message_type == 'personal':
old_message["recipient"][0]["email"] = fix_email(old_message["recipient"][0]["email"])
elif message_type == "huddle":
for i in xrange(len(old_message["recipient"])):
old_message["recipient"][i]["email"] = fix_email(old_message["recipient"][i]["email"])
old_messages.append(old_message)
if message_type in ["subscription_added", "subscription_removed"]:
stream_name = old_message["name"].strip()
canon_stream_name = stream_name.lower()
if canon_stream_name not in stream_dict:
stream_dict[(old_message["domain"], canon_stream_name)] = \
(old_message["domain"], stream_name)
elif message_type == "user_created":
user_set.add((old_message["user"], old_message["full_name"], old_message["short_name"], False))
elif message_type == "realm_created":
realm_set.add(old_message["domain"])
if message_type not in ["stream", "huddle", "personal"]:
return
sender_email = old_message["sender_email"]
domain = split_email_to_domain(sender_email)
realm_set.add(domain)
if old_message["sender_email"] not in email_set:
user_set.add((old_message["sender_email"],
old_message["sender_full_name"],
old_message["sender_short_name"],
False))
if 'sending_client' in old_message:
client_set.add(old_message['sending_client'])
if message_type == 'stream':
stream_name = old_message["recipient"].strip()
canon_stream_name = stream_name.lower()
if canon_stream_name not in stream_dict:
stream_dict[(domain, canon_stream_name)] = (domain, stream_name)
elif message_type == 'personal':
u = old_message["recipient"][0]
if u["email"] not in email_set:
user_set.add((u["email"], u["full_name"], u["short_name"], False))
email_set.add(u["email"])
elif message_type == 'huddle':
for u in old_message["recipient"]:
user_set.add((u["email"], u["full_name"], u["short_name"], False))
if u["email"] not in email_set:
user_set.add((u["email"], u["full_name"], u["short_name"], False))
email_set.add(u["email"])
huddle_user_set.add(tuple(sorted(set(u["email"] for u in old_message["recipient"]))))
else:
raise ValueError('Bad message type')
event_glob = os.path.join(settings.EVENT_LOG_DIR, 'events.*')
for filename in sorted(glob.glob(event_glob)):
with file(filename, "r") as message_log:
for line in message_log.readlines():
process_line(line)
stream_recipients = {}
user_recipients = {}
huddle_recipients = {}
# Then, create the objects our messages need.
print datetime.datetime.now(), "Creating realms..."
bulk_create_realms(realm_set)
realms = {}
for realm in Realm.objects.all():
realms[realm.domain] = realm
print datetime.datetime.now(), "Creating clients..."
bulk_create_clients(client_set)
clients = {}
for client in Client.objects.all():
clients[client.name] = client
print datetime.datetime.now(), "Creating streams..."
bulk_create_streams(realms, stream_dict.values())
streams = {}
for stream in Stream.objects.all():
streams[stream.id] = stream
for recipient in Recipient.objects.filter(type=Recipient.STREAM):
stream_recipients[(streams[recipient.type_id].realm_id,
streams[recipient.type_id].name.lower())] = recipient
print datetime.datetime.now(), "Creating users..."
bulk_create_users(realms, user_set)
users = {}
users_by_id = {}
for user_profile in UserProfile.objects.select_related().all():
users[user_profile.email] = user_profile
users_by_id[user_profile.id] = user_profile
for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):
user_recipients[users_by_id[recipient.type_id].email] = recipient
print datetime.datetime.now(), "Creating huddles..."
bulk_create_huddles(users, huddle_user_set)
huddles_by_id = {}
for huddle in Huddle.objects.all():
huddles_by_id[huddle.id] = huddle
for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):
huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient
# TODO: Add a special entry type in the log that is a subscription
# change and import those as we go to make subscription changes
# take effect!
print datetime.datetime.now(), "Importing subscriptions..."
subscribers = {}
for s in Subscription.objects.select_related().all():
if s.active:
subscribers.setdefault(s.recipient.id, set()).add(s.user_profile.id)
# Then create all the messages, without talking to the DB!
print datetime.datetime.now(), "Importing messages, part 1..."
first_message_id = None
if Message.objects.exists():
first_message_id = Message.objects.all().order_by("-id")[0].id + 1
messages_to_create = []
for idx, old_message in enumerate(old_messages):
message_type = old_message["type"]
if message_type not in ["stream", "huddle", "personal"]:
continue
message = Message()
sender_email = old_message["sender_email"]
domain = split_email_to_domain(sender_email)
realm = realms[domain]
message.sender = users[sender_email]
type_hash = {"stream": Recipient.STREAM,
"huddle": Recipient.HUDDLE,
"personal": Recipient.PERSONAL}
if 'sending_client' in old_message:
message.sending_client = clients[old_message['sending_client']]
elif sender_email in ["othello@zulip.com", "iago@zulip.com", "prospero@zulip.com",
"cordelia@zulip.com", "hamlet@zulip.com"]:
message.sending_client = clients['populate_db']
elif realm.domain == "zulip.com":
message.sending_client = clients["website"]
elif realm.domain == "mit.edu":
message.sending_client = clients['zephyr_mirror']
else:
message.sending_client = clients['populate_db']
message.type = type_hash[message_type]
message.content = old_message["content"]
message.subject = old_message["subject"]
message.pub_date = timestamp_to_datetime(old_message["timestamp"])
if message.type == Recipient.PERSONAL:
message.recipient = user_recipients[old_message["recipient"][0]["email"]]
elif message.type == Recipient.STREAM:
message.recipient = stream_recipients[(realm.id,
old_message["recipient"].lower())]
elif message.type == Recipient.HUDDLE:
huddle_hash = get_huddle_hash([users[u["email"]].id
for u in old_message["recipient"]])
message.recipient = huddle_recipients[huddle_hash]
else:
raise ValueError('Bad message type')
messages_to_create.append(message)
print datetime.datetime.now(), "Importing messages, part 2..."
Message.objects.bulk_create(messages_to_create)
messages_to_create = []
# Finally, create all the UserMessage objects
print datetime.datetime.now(), "Importing usermessages, part 1..."
personal_recipients = {}
for r in Recipient.objects.filter(type = Recipient.PERSONAL):
personal_recipients[r.id] = True
all_messages = Message.objects.all()
user_messages_to_create = []
messages_by_id = {}
for message in all_messages:
messages_by_id[message.id] = message
if len(messages_by_id) == 0:
print datetime.datetime.now(), "No old messages to replay"
return
if first_message_id is None:
first_message_id = min(messages_by_id.keys())
tot_user_messages = 0
pending_subs = {}
current_message_id = first_message_id
pending_colors = {}
for old_message in old_messages:
message_type = old_message["type"]
if message_type == 'subscription_added':
stream_key = (realms[old_message["domain"]].id, old_message["name"].strip().lower())
subscribers.setdefault(stream_recipients[stream_key].id,
set()).add(users[old_message["user"]].id)
pending_subs[(stream_recipients[stream_key].id,
users[old_message["user"]].id)] = True
continue
elif message_type == "subscription_removed":
stream_key = (realms[old_message["domain"]].id, old_message["name"].strip().lower())
user_id = users[old_message["user"]].id
subscribers.setdefault(stream_recipients[stream_key].id, set())
try:
subscribers[stream_recipients[stream_key].id].remove(user_id)
except KeyError:
print "Error unsubscribing %s from %s: not subscribed" % (
old_message["user"], old_message["name"])
pending_subs[(stream_recipients[stream_key].id,
users[old_message["user"]].id)] = False
continue
elif message_type == "user_activated" or message_type == "user_created":
# These are rare, so just handle them the slow way
user_profile = users[old_message["user"]]
join_date = timestamp_to_datetime(old_message['timestamp'])
do_activate_user(user_profile, log=False, join_date=join_date)
# Update the cache of users to show this user as activated
users_by_id[user_profile.id] = user_profile
users[old_message["user"]] = user_profile
continue
elif message_type == "user_deactivated":
user_profile = users[old_message["user"]]
do_deactivate_user(user_profile, log=False)
continue
elif message_type == "user_change_password":
# Just handle these the slow way
user_profile = users[old_message["user"]]
do_change_password(user_profile, old_message["pwhash"], log=False,
hashed_password=True)
continue
elif message_type == "user_change_full_name":
# Just handle these the slow way
user_profile = users[old_message["user"]]
user_profile.full_name = old_message["full_name"]
user_profile.save(update_fields=["full_name"])
continue
elif message_type == "enable_desktop_notifications_changed":
# Just handle these the slow way
user_profile = users[old_message["user"]]
user_profile.enable_desktop_notifications = (old_message["enable_desktop_notifications"] != "false")
user_profile.save(update_fields=["enable_desktop_notifications"])
continue
elif message_type == "enable_sounds_changed":
user_profile = users[old_message["user"]]
user_profile.enable_sounds = (old_message["enable_sounds"] != "false")
user_profile.save(update_fields=["enable_sounds"])
elif message_type == "enable_offline_email_notifications_changed":
user_profile = users[old_message["user"]]
user_profile.enable_offline_email_notifications = (old_message["enable_offline_email_notifications"] != "false")
user_profile.save(update_fields=["enable_offline_email_notifications"])
continue
elif message_type == "enable_offline_push_notifications_changed":
user_profile = users[old_message["user"]]
user_profile.enable_offline_push_notifications = (old_message["enable_offline_push_notifications"] != "false")
user_profile.save(update_fields=["enable_offline_push_notifications"])
continue
elif message_type == "default_streams":
set_default_streams(Realm.objects.get(domain=old_message["domain"]),
old_message["streams"])
continue
elif message_type == "subscription_property":
property_name = old_message.get("property")
if property_name == "stream_color" or property_name == "color":
color = old_message.get("color", old_message.get("value"))
pending_colors[(old_message["user"],
old_message["stream_name"].lower())] = color
elif property_name in ["in_home_view", "notifications"]:
# TODO: Handle this
continue
else:
raise RuntimeError("Unknown property %s" % (property_name,))
continue
elif message_type == "realm_created":
# No action required
continue
elif message_type in ["user_email_changed", "update_onboarding", "update_message"]:
# TODO: Handle these
continue
if message_type not in ["stream", "huddle", "personal"]:
raise RuntimeError("Unexpected message type %s" % (message_type,))
message = messages_by_id[current_message_id]
current_message_id += 1
if message.recipient_id not in subscribers:
# Nobody received this message -- probably due to our
# subscriptions being out-of-date.
continue
recipient_user_ids = set()
for user_profile_id in subscribers[message.recipient_id]:
recipient_user_ids.add(user_profile_id)
if message.recipient_id in personal_recipients:
# Include the sender in huddle recipients
recipient_user_ids.add(message.sender_id)
for user_profile_id in recipient_user_ids:
if users_by_id[user_profile_id].is_active:
um = UserMessage(user_profile_id=user_profile_id,
message=message)
user_messages_to_create.append(um)
if len(user_messages_to_create) > 100000:
tot_user_messages += len(user_messages_to_create)
UserMessage.objects.bulk_create(user_messages_to_create)
user_messages_to_create = []
print datetime.datetime.now(), "Importing usermessages, part 2..."
tot_user_messages += len(user_messages_to_create)
UserMessage.objects.bulk_create(user_messages_to_create)
print datetime.datetime.now(), "Finalizing subscriptions..."
current_subs = {}
current_subs_obj = {}
for s in Subscription.objects.select_related().all():
current_subs[(s.recipient_id, s.user_profile_id)] = s.active
current_subs_obj[(s.recipient_id, s.user_profile_id)] = s
subscriptions_to_add = []
subscriptions_to_change = []
for pending_sub in pending_subs.keys():
(recipient_id, user_profile_id) = pending_sub
current_state = current_subs.get(pending_sub)
if pending_subs[pending_sub] == current_state:
# Already correct in the database
continue
elif current_state is not None:
subscriptions_to_change.append((pending_sub, pending_subs[pending_sub]))
continue
s = Subscription(recipient_id=recipient_id,
user_profile_id=user_profile_id,
active=pending_subs[pending_sub])
subscriptions_to_add.append(s)
Subscription.objects.bulk_create(subscriptions_to_add)
for (sub, active) in subscriptions_to_change:
current_subs_obj[sub].active = active
current_subs_obj[sub].save(update_fields=["active"])
subs = {}
for sub in Subscription.objects.all():
subs[(sub.user_profile_id, sub.recipient_id)] = sub
# TODO: do restore of subscription colors -- we're currently not
# logging changes so there's little point in having the code :(
print datetime.datetime.now(), "Finished importing %s messages (%s usermessages)" % \
(len(all_messages), tot_user_messages)
site = Site.objects.get_current()
site.domain = 'zulip.com'
site.save()
print datetime.datetime.now(), "Filling in user pointers..."
# Set restored pointers to the very latest messages
for user_profile in UserProfile.objects.all():
try:
top = UserMessage.objects.filter(
user_profile_id=user_profile.id).order_by("-message")[0]
user_profile.pointer = top.message_id
except IndexError:
user_profile.pointer = -1
user_profile.save(update_fields=["pointer"])
print datetime.datetime.now(), "Done replaying old messages"
# Create some test messages, including:
# - multiple streams
# - multiple subjects per stream
# - multiple huddles
# - multiple personals converastions
# - multiple messages per subject
# - both single and multi-line content
def send_messages(data):
(tot_messages, personals_pairs, options, output) = data
random.seed(os.getpid())
texts = file("zilencer/management/commands/test_messages.txt", "r").readlines()
offset = random.randint(0, len(texts))
recipient_streams = [klass.id for klass in
Recipient.objects.filter(type=Recipient.STREAM)]
recipient_huddles = [h.id for h in Recipient.objects.filter(type=Recipient.HUDDLE)]
huddle_members = {}
for h in recipient_huddles:
huddle_members[h] = [s.user_profile.id for s in
Subscription.objects.filter(recipient_id=h)]
num_messages = 0
random_max = 1000000
recipients = {}
while num_messages < tot_messages:
saved_data = ''
message = Message()
message.sending_client = get_client('populate_db')
length = random.randint(1, 5)
lines = (t.strip() for t in texts[offset: offset + length])
message.content = '\n'.join(lines)
offset += length
offset = offset % len(texts)
randkey = random.randint(1, random_max)
if (num_messages > 0 and
random.randint(1, random_max) * 100. / random_max < options["stickyness"]):
# Use an old recipient
message_type, recipient_id, saved_data = recipients[num_messages - 1]
if message_type == Recipient.PERSONAL:
personals_pair = saved_data
random.shuffle(personals_pair)
elif message_type == Recipient.STREAM:
message.subject = saved_data
message.recipient = get_recipient_by_id(recipient_id)
elif message_type == Recipient.HUDDLE:
message.recipient = get_recipient_by_id(recipient_id)
elif (randkey <= random_max * options["percent_huddles"] / 100.):
message_type = Recipient.HUDDLE
message.recipient = get_recipient_by_id(random.choice(recipient_huddles))
elif (randkey <= random_max * (options["percent_huddles"] + options["percent_personals"]) / 100.):
message_type = Recipient.PERSONAL
personals_pair = random.choice(personals_pairs)
random.shuffle(personals_pair)
elif (randkey <= random_max * 1.0):
message_type = Recipient.STREAM
message.recipient = get_recipient_by_id(random.choice(recipient_streams))
if message_type == Recipient.HUDDLE:
sender_id = random.choice(huddle_members[message.recipient.id])
message.sender = get_user_profile_by_id(sender_id)
elif message_type == Recipient.PERSONAL:
message.recipient = Recipient.objects.get(type=Recipient.PERSONAL,
type_id=personals_pair[0])
message.sender = get_user_profile_by_id(personals_pair[1])
saved_data = personals_pair
elif message_type == Recipient.STREAM:
stream = Stream.objects.get(id=message.recipient.type_id)
# Pick a random subscriber to the stream
message.sender = random.choice(Subscription.objects.filter(
recipient=message.recipient)).user_profile
message.subject = stream.name + str(random.randint(1, 3))
saved_data = message.subject
message.pub_date = now()
do_send_message(message)
recipients[num_messages] = [message_type, message.recipient.id, saved_data]
num_messages += 1
return tot_messages
| apache-2.0 |
yati-sagade/incubator-airflow | tests/executors/test_local_executor.py | 11 | 2469 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from airflow.executors.local_executor import LocalExecutor
from airflow.utils.state import State
from airflow.utils.timeout import timeout
class LocalExecutorTest(unittest.TestCase):
TEST_SUCCESS_COMMANDS = 5
def execution_parallelism(self, parallelism=0):
executor = LocalExecutor(parallelism=parallelism)
executor.start()
success_key = 'success {}'
success_command = 'echo {}'
fail_command = 'exit 1'
for i in range(self.TEST_SUCCESS_COMMANDS):
key, command = success_key.format(i), success_command.format(i)
executor.execute_async(key=key, command=command)
executor.running[key] = True
# errors are propagated for some reason
try:
executor.execute_async(key='fail', command=fail_command)
except:
pass
executor.running['fail'] = True
if parallelism == 0:
with timeout(seconds=5):
executor.end()
else:
executor.end()
for i in range(self.TEST_SUCCESS_COMMANDS):
key = success_key.format(i)
self.assertTrue(executor.event_buffer[key], State.SUCCESS)
self.assertTrue(executor.event_buffer['fail'], State.FAILED)
for i in range(self.TEST_SUCCESS_COMMANDS):
self.assertNotIn(success_key.format(i), executor.running)
self.assertNotIn('fail', executor.running)
expected = self.TEST_SUCCESS_COMMANDS + 1 if parallelism == 0 else parallelism
self.assertEqual(executor.workers_used, expected)
def test_execution_unlimited_parallelism(self):
self.execution_parallelism(parallelism=0)
def test_execution_limited_parallelism(self):
test_parallelism = 2
self.execution_parallelism(parallelism=test_parallelism)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dturner-tw/pants | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/missing_contextmanager.py | 18 | 1167 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
# TODO(wickman)
#
# 1. open(foo) should always be done in a with context.
#
# 2. if you see acquire/release on the same variable in a particular ast
# body, warn about context manager use.
class MissingContextManager(CheckstylePlugin):
"""Recommend the use of contextmanagers when it seems appropriate."""
def nits(self):
with_contexts = set(self.iter_ast_types(ast.With))
with_context_calls = set(node.context_expr for node in with_contexts
if isinstance(node.context_expr, ast.Call))
for call in self.iter_ast_types(ast.Call):
if isinstance(call.func, ast.Name) and call.func.id == 'open' \
and (call not in with_context_calls):
yield self.warning('T802', 'open() calls should be made within a contextmanager.', call)
| apache-2.0 |
MegaMark16/django-puzzle-captcha | puzzle_captcha_test/settings.py | 1 | 3435 | # Django settings for puzzle_captcha project.
import os
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_NAME = PROJECT_DIR.split("/")[-1]
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_DIR, 'puzzle_captcha.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://media.apprabbit.com/puzzlecaptcha/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = 'http://media.apprabbit.com/adminmedia/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ehzc8cm-2+ffyqv8vc&vebzj*mm@=gxmi4a^o($3llwatzhndn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'puzzle_captcha_test.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'sorl.thumbnail',
'puzzle_captcha',
'test_app',
)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 34 | 10313 | import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.utils.testing import (
assert_equal, assert_almost_equal, assert_raise_message,
)
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# with provided sparse contingency
C = contingency_matrix(labels_a, labels_b, sparse=True)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# with provided dense contingency
C = contingency_matrix(labels_a, labels_b)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
n_samples = C.sum()
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_expected_mutual_info_overflow():
# Test for regression where contingency cell exceeds 2**16
# leading to overflow in np.outer, resulting in EMI > 1
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_contingency_matrix_sparse():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
assert_array_almost_equal(C, C_sparse)
C_sparse = assert_raise_message(ValueError,
"Cannot set 'eps' when sparse=True",
contingency_matrix, labels_a, labels_b,
eps=1e-10, sparse=True)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = (np.ones(i, dtype=np.int),
np.arange(i, dtype=np.int))
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = (random_state.randint(0, 10, i),
random_state.randint(0, 10, i))
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_fowlkes_mallows_score():
# General case
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 2, 2])
assert_almost_equal(score, 4. / np.sqrt(12. * 6.))
# Perfect match but where the label names changed
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0])
assert_almost_equal(perfect_score, 1.)
# Worst case
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 4, 5])
assert_almost_equal(worst_score, 0.)
def test_fowlkes_mallows_score_properties():
# handcrafted example
labels_a = np.array([0, 0, 0, 1, 1, 2])
labels_b = np.array([1, 1, 2, 2, 0, 0])
expected = 1. / np.sqrt((1. + 3.) * (1. + 2.))
# FMI = TP / sqrt((TP + FP) * (TP + FN))
score_original = fowlkes_mallows_score(labels_a, labels_b)
assert_almost_equal(score_original, expected)
# symetric property
score_symetric = fowlkes_mallows_score(labels_b, labels_a)
assert_almost_equal(score_symetric, expected)
# permutation property
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
assert_almost_equal(score_permuted, expected)
# symetric and permutation(both together)
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
assert_almost_equal(score_both, expected)
| bsd-3-clause |
zackslash/scrapy | tests/test_utils_spider.py | 147 | 1056 | import unittest
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils.spider import iterate_spider_output, iter_spider_classes
from scrapy.spiders import CrawlSpider
class MyBaseSpider(CrawlSpider):
pass # abstract spider
class MySpider1(MyBaseSpider):
name = 'myspider1'
class MySpider2(MyBaseSpider):
name = 'myspider2'
class UtilsSpidersTestCase(unittest.TestCase):
def test_iterate_spider_output(self):
i = BaseItem()
r = Request('http://scrapytest.org')
o = object()
self.assertEqual(list(iterate_spider_output(i)), [i])
self.assertEqual(list(iterate_spider_output(r)), [r])
self.assertEqual(list(iterate_spider_output(o)), [o])
self.assertEqual(list(iterate_spider_output([r, i, o])), [r, i, o])
def test_iter_spider_classes(self):
import tests.test_utils_spider
it = iter_spider_classes(tests.test_utils_spider)
self.assertEqual(set(it), {MySpider1, MySpider2})
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
KirillShaman/escalate_gspread | app/gspreadsheet.py | 1 | 2089 | # see: http://www.indjango.com/access-google-sheets-in-python-using-gspread/
# This will issue 100 HTTP requests to Google API:
# for row in range(1, 101):
# worksheet.update_cell(1, row, 'Bingo!')
# While this is only two HTTP requests:
# cell_list = worksheet.range('A1:A100')
# for cell in cell_list:
# cell.value = 'Bingo!'
# worksheet.update_cells(cell_list)
# Spreadsheets API is not very fast so good advice would be to reduce underlying HTTP requests as much as you can.
# I've just added a new method for Worksheet: export().
# It will allow you to request your CSV data right from a Google API server bypassing gspread's data processing.
# It should be faster, so please try it and share your results.
# I haven't released the update on PyPI yet, so you have to check out the master branch from GitHub for the new code.
# That way, your code snippet for downloading a worksheet may look like this:
# def fetch_sheet(sheet, id):
# spreadsheet = client.open_by_key(id)
# worksheet = spreadsheet.worksheets()[0]
# csv_data = worksheet.export(format='csv').read()
# with open('sheets/%s.csv' % sheet, 'w') as f:
# f.write(csv_data)
# You can omit format argument in export call as it defaults to 'csv'. 'pdf' and 'tsv' values work as well.
import gspread
class Gspreadsheet():
def __init__(self, user, password, url):
self.user = user
self.password = password
self.url = url
self.gclient = None
def login(self):
try:
self.gclient = gspread.login(self.user, self.password)
except Exception as e:
print("Error:")
print(e)
self.gclient = None
return self.gclient
def get_row(self, wks, row):
return wks.row_values(row)
def col_one(self, wks):
# note: ".row_count" seems to always equal 1,000 rows,
# so be sure to break on the first empty row during processing:
cell_list = wks.range("A2:A%s" % wks.row_count)
params = []
for cell in cell_list:
if len(cell.value) > 0:
params.append(cell.value)
else:
break
return params
| mit |
c0defreak/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_unaligned_structures.py | 282 | 1215 | import sys, unittest
from ctypes import *
structures = []
byteswapped_structures = []
if sys.byteorder == "little":
SwappedStructure = BigEndianStructure
else:
SwappedStructure = LittleEndianStructure
for typ in [c_short, c_int, c_long, c_longlong,
c_float, c_double,
c_ushort, c_uint, c_ulong, c_ulonglong]:
class X(Structure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
class Y(SwappedStructure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
structures.append(X)
byteswapped_structures.append(Y)
class TestStructures(unittest.TestCase):
def test_native(self):
for typ in structures:
## print typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
def test_swapped(self):
for typ in byteswapped_structures:
## print >> sys.stderr, typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ccrook/Quantum-GIS | tests/src/python/test_qgsvectorfilewritertask.py | 8 | 4446 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriterTask.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import (
QgsApplication,
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsVectorFileWriter,
QgsVectorFileWriterTask
)
from qgis.PyQt.QtCore import QCoreApplication, QDir
from qgis.testing import start_app, unittest
start_app()
def create_temp_filename(base_file):
return os.path.join(str(QDir.tempPath()), base_file)
class TestQgsVectorFileWriterTask(unittest.TestCase):
def setUp(self):
self.success = False
self.fail = False
def onSuccess(self):
self.success = True
def onFail(self):
self.fail = True
def createLayer(self):
layer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(layer, 'Provider not initialized')
provider = layer.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
provider.addFeatures([ft])
return layer
def testSuccess(self):
"""test successfully writing a layer"""
self.layer = self.createLayer()
options = QgsVectorFileWriter.SaveVectorOptions()
tmp = create_temp_filename('successlayer.shp')
task = QgsVectorFileWriterTask(self.layer, tmp, options)
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertTrue(self.success)
self.assertFalse(self.fail)
def testLayerRemovalBeforeRun(self):
"""test behavior when layer is removed before task begins"""
self.layer = self.createLayer()
options = QgsVectorFileWriter.SaveVectorOptions()
tmp = create_temp_filename('fail.shp')
task = QgsVectorFileWriterTask(self.layer, tmp, options)
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
# remove layer
self.layer = None
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertFalse(self.success)
self.assertTrue(self.fail)
def testNoLayer(self):
"""test that failure (and not crash) occurs when no layer set"""
options = QgsVectorFileWriter.SaveVectorOptions()
tmp = create_temp_filename('fail.shp')
task = QgsVectorFileWriterTask(None, tmp, options)
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertFalse(self.success)
self.assertTrue(self.fail)
def testFieldValueConverter(self):
"""test no crash when fieldValueConverter is used"""
self.layer = self.createLayer()
options = QgsVectorFileWriter.SaveVectorOptions()
converter = QgsVectorFileWriter.FieldValueConverter()
options.fieldValueConverter = converter
tmp = create_temp_filename('converter.shp')
task = QgsVectorFileWriterTask(self.layer, tmp, options)
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
del converter
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertTrue(self.success)
self.assertFalse(self.fail)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
hudl/redash | redash/query_runner/google_analytics.py | 6 | 6240 | # -*- coding: utf-8 -*-
import json
import logging
from base64 import b64decode
from datetime import datetime
from urlparse import parse_qs, urlparse
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
enabled = True
except ImportError as e:
enabled = False
types_conv = dict(
STRING=TYPE_STRING,
INTEGER=TYPE_INTEGER,
FLOAT=TYPE_FLOAT,
DATE=TYPE_DATE,
DATETIME=TYPE_DATETIME
)
def parse_ga_response(response):
columns = []
for h in response['columnHeaders']:
if h['name'] in ('ga:date', 'mcf:conversionDate'):
h['dataType'] = 'DATE'
elif h['name'] == 'ga:dateHour':
h['dataType'] = 'DATETIME'
columns.append({
'name': h['name'],
'friendly_name': h['name'].split(':', 1)[1],
'type': types_conv.get(h['dataType'], 'string')
})
rows = []
for r in response['rows']:
d = {}
for c, value in enumerate(r):
column_name = response['columnHeaders'][c]['name']
column_type = filter(lambda col: col['name'] == column_name, columns)[0]['type']
# mcf results come a bit different than ga results:
if isinstance(value, dict):
if 'primitiveValue' in value:
value = value['primitiveValue']
elif 'conversionPathValue' in value:
steps = []
for step in value['conversionPathValue']:
steps.append('{}:{}'.format(step['interactionType'], step['nodeValue']))
value = ', '.join(steps)
else:
raise Exception("Results format not supported")
if column_type == TYPE_DATE:
value = datetime.strptime(value, '%Y%m%d')
elif column_type == TYPE_DATETIME:
if len(value) == 10:
value = datetime.strptime(value, '%Y%m%d%H')
elif len(value) == 12:
value = datetime.strptime(value, '%Y%m%d%H%M')
else:
raise Exception("Unknown date/time format in results: '{}'".format(value))
d[column_name] = value
rows.append(d)
return {'columns': columns, 'rows': rows}
class GoogleAnalytics(BaseSQLQueryRunner):
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_analytics"
@classmethod
def name(cls):
return "Google Analytics"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def __init__(self, configuration):
super(GoogleAnalytics, self).__init__(configuration)
self.syntax = 'json'
def _get_analytics_service(self):
scope = ['https://www.googleapis.com/auth/analytics.readonly']
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
return build('analytics', 'v3', http=creds.authorize(httplib2.Http()))
def _get_tables(self, schema):
accounts = self._get_analytics_service().management().accounts().list().execute().get('items')
if accounts is None:
raise Exception("Failed getting accounts.")
else:
for account in accounts:
schema[account['name']] = {'name': account['name'], 'columns': []}
properties = self._get_analytics_service().management().webproperties().list(
accountId=account['id']).execute().get('items', [])
for property_ in properties:
schema[account['name']]['columns'].append(
u'{0} (ga:{1})'.format(property_['name'], property_['defaultProfileId'])
)
return schema.values()
def test_connection(self):
try:
service = self._get_analytics_service()
service.management().accounts().list().execute()
except HttpError as e:
# Make sure we return a more readable error to the end user
raise Exception(e._get_reason())
def run_query(self, query, user):
logger.debug("Analytics is about to execute query: %s", query)
try:
params = json.loads(query)
except:
params = parse_qs(urlparse(query).query, keep_blank_values=True)
for key in params.keys():
params[key] = ','.join(params[key])
if '-' in key:
params[key.replace('-', '_')] = params.pop(key)
if 'mcf:' in params['metrics'] and 'ga:' in params['metrics']:
raise Exception("Can't mix mcf: and ga: metrics.")
if 'mcf:' in params.get('dimensions', '') and 'ga:' in params.get('dimensions', ''):
raise Exception("Can't mix mcf: and ga: dimensions.")
if 'mcf:' in params['metrics']:
api = self._get_analytics_service().data().mcf()
else:
api = self._get_analytics_service().data().ga()
if len(params) > 0:
try:
response = api.get(**params).execute()
data = parse_ga_response(response)
error = None
json_data = json.dumps(data, cls=JSONEncoder)
except HttpError as e:
# Make sure we return a more readable error to the end user
error = e._get_reason()
json_data = None
else:
error = 'Wrong query format.'
json_data = None
return json_data, error
register(GoogleAnalytics)
| bsd-2-clause |
nrjcoin-project/nrjcoin | contrib/testgen/gen_base58_test_vectors.py | 1064 | 4344 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit |
openjck/kuma | kuma/users/providers/github/views.py | 7 | 1719 | import requests
from allauth.account.utils import get_next_redirect_url
from allauth.socialaccount.providers.oauth2.views import (OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
from kuma.core.urlresolvers import reverse
class KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):
"""
A custom GitHub OAuth adapter to be used for fetching the list
of private email addresses stored for the given user at GitHub.
We store those email addresses in the extra data of each account.
"""
email_url = 'https://api.github.com/user/emails'
def complete_login(self, request, app, token, **kwargs):
params = {'access_token': token.token}
profile_data = requests.get(self.profile_url, params=params)
extra_data = profile_data.json()
email_data = requests.get(self.email_url, params=params)
extra_data['email_addresses'] = email_data.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
class KumaOAuth2LoginView(OAuth2LoginView):
def dispatch(self, request):
next_url = (get_next_redirect_url(request) or
reverse('users.my_edit_page',
locale=request.LANGUAGE_CODE))
request.session['sociallogin_next_url'] = next_url
request.session.modified = True
return super(KumaOAuth2LoginView, self).dispatch(request)
oauth2_login = KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)
| mpl-2.0 |
nirmeshk/oh-mainline | mysite/missions/base/tests.py | 9 | 2714 | # This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2010, 2011 OpenHatch, Inc.
# Copyright (C) 2010 John Stumpo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from mysite.base.tests import TwillTests
from mysite.missions.base import views
import mysite.missions.base.view_helpers
from mysite.missions.models import StepCompletion, Step
from mysite.profile.models import Person
from mysite.base.view_helpers import subproc_check_output
from django.conf import settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.test import TestCase as DjangoTestCase
import mock
import os
import tarfile
from StringIO import StringIO
import tempfile
import subprocess
import difflib
import shutil
import random
def get_mission_test_data_path(mission_type):
base_path = os.path.abspath(os.path.dirname(__file__))
new_path = os.path.join(base_path, '..', mission_type, 'test_data')
absolute_ified = os.path.abspath(new_path)
return absolute_ified
def make_testdata_filename(mission_type, filename):
return os.path.join(os.path.dirname(__file__), '..', mission_type,
'testdata', filename)
def list_of_true_keys(d):
ret = []
for key in d:
if d[key]:
ret.append(key)
return ret
class MainPageTests(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def setUp(self):
TwillTests.setUp(self)
self.client = self.login_with_client()
def test_mission_completion_list_display(self):
response = self.client.get(reverse(views.main_page))
self.assertFalse(
list_of_true_keys(response.context['completed_missions']))
paulproteus = Person.objects.get(user__username='paulproteus')
StepCompletion(person=paulproteus,
step=Step.objects.get(name='tar')).save()
response = self.client.get(reverse(views.main_page))
self.assertEqual(['tar'], list_of_true_keys(
response.context['completed_missions']))
| agpl-3.0 |
vnsofthe/odoo | addons/l10n_bo/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Maaack/Silent-Night-API | silent_night/mixins/views.py | 1 | 2435 | from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from rest_framework.generics import RetrieveUpdateDestroyAPIView, ListCreateAPIView
from rest_framework.viewsets import ModelViewSet
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def default_process_list_request(request, serializer_class, object_class):
if request.method == 'GET':
objects = object_class.objects.all()
serializer = serializer_class(objects, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def default_process_detail_request(request, serializer_class, object_instance):
if request.method == 'GET':
serializer = serializer_class(object_instance)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = serializer_class(object_instance, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
object_instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class BaseListView(ListCreateAPIView):
"""
List all object instances, or create a new object instance.
"""
permission_classes = (AllowAny,)
class Meta:
abstract = True
class BaseDetailView(RetrieveUpdateDestroyAPIView):
"""
Retrieve, update or delete an object instance.
"""
permission_classes = (AllowAny,)
class Meta:
abstract = True
class BaseViewSet(ModelViewSet):
"""
Retrieve, update or delete an object instance.
"""
permission_classes = (AllowAny,)
class Meta:
abstract = True | gpl-3.0 |
wuzhiwu/pb-socket-rpc-ex | python/src/protobuf/socketrpc/examples/time/run_client.py | 9 | 2309 | #!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
# Copyright (c) 2010 Jan Dittberner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
run_client.py - An example client using the python socket
implementation of the Google Protocol Buffers.
This module is an executable script demonstrating the usage of the
python socket implementation of the Google Protocol Buffers. To work
correctly, the script requires a server to be running first
(i.e. run_server.py).
Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
Zach Walker (zwalker@lcogt.net)
Jan Dittberner (jan@dittberner.info)
May 2009, Nov 2010
'''
# Add main protobuf module to classpath
import sys
sys.path.append('../../main')
import time_pb2 as proto
from protobuf.socketrpc import RpcService
import logging
log = logging.getLogger(__name__)
hostname = 'localhost'
port = 8090
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log.debug("test")
# Create request message
request = proto.TimeRequest()
service = RpcService(proto.TimeService_Stub, port, hostname)
try:
response = service.getTime(request, timeout=1000)
log.info(response)
except Exception, ex:
log.exception(ex)
| mit |
simonwydooghe/ansible | test/units/modules/network/fortios/test_fortios_router_access_list6.py | 21 | 7717 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_access_list6
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_access_list6.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_access_list6_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_access_list6_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_access_list6_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'access-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_access_list6_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'access-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_access_list6_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_access_list6_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_access_list6': {
'random_attribute_not_valid': 'tag',
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_access_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'access-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
raj454raj/eden | modules/s3db/doc.py | 4 | 32420 | # -*- coding: utf-8 -*-
""" Sahana Eden Document Library
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DocumentLibrary",
"S3DocSitRepModel",
"doc_image_represent",
"doc_document_list_layout",
)
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DocumentLibrary(S3Model):
names = ("doc_entity",
"doc_document",
"doc_document_id",
"doc_image",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
folder = current.request.folder
super_link = self.super_link
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = Storage(asset_asset=T("Asset"),
cms_post=T("Post"),
cr_shelter=T("Shelter"),
deploy_mission=T("Mission"),
doc_sitrep=T("Situation Report"),
event_incident=T("Incident"),
event_incident_report=T("Incident Report"),
hms_hospital=T("Hospital"),
hrm_human_resource=T("Human Resource"),
inv_adj=T("Stock Adjustment"),
inv_warehouse=T("Warehouse"),
# @ToDo: Deprecate
irs_ireport=T("Incident Report"),
pr_group=T("Team"),
project_project=T("Project"),
project_activity=T("Project Activity"),
project_framework=T("Project Framework"),
project_task=T("Task"),
org_office=T("Office"),
org_facility=T("Facility"),
org_group=T("Organization Group"),
req_req=T("Request"),
# @ToDo: Deprecate
stats_people=T("People"),
vulnerability_document=T("Vulnerability Document"),
vulnerability_risk=T("Risk"),
vulnerability_evac_route=T("Evacuation Route"),
)
tablename = "doc_entity"
self.super_entity(tablename, "doc_id", entity_types)
# Components
doc_id = "doc_id"
self.add_components(tablename,
doc_document = doc_id,
doc_image = doc_id,
)
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
define_table(tablename,
# Instance
self.stats_source_superlink,
# Component not instance
super_link(doc_id, "doc_entity"),
# @ToDo: Remove since Site Instances are doc entities?
super_link("site_id", "org_site"),
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = self.doc_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
label = T("Name")
),
Field("url",
label = T("URL"),
represent = lambda url: \
url and A(url, _href=url) or NONE,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("has_been_indexed", "boolean",
default = False,
readable = False,
writable = False,
),
person_id(
# Enable when-required
label = T("Author"),
readable = False,
writable = False,
comment = person_comment(T("Author"),
T("The Author of this Document (optional)"))
),
organisation_id(# Enable when-required
readable = False,
writable = False,
),
s3_date(label = T("Date Published"),
),
# @ToDo: Move location to link table
location_id(# Enable when-required
readable = False,
writable = False,
),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Reference Document"),
title_display = T("Document Details"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List Documents"),
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Search Method
# Resource Configuration
if current.deployment_settings.get_base_solr_url():
onaccept = self.document_onaccept
ondelete = self.document_ondelete
else:
onaccept = None
ondelete = None
configure(tablename,
context = {"organisation": "organisation_id",
"person": "person_id",
"site": "site_id",
},
deduplicate = self.document_duplicate,
list_layout = doc_document_list_layout,
onaccept = onaccept,
ondelete = ondelete,
onvalidation = self.document_onvalidation,
super_entity = "stats_source",
)
# Reusable field
represent = doc_DocumentRepresent(lookup = tablename,
fields = ("name", "file", "url"),
labels = "%(name)s",
show_link = True)
document_id = S3ReusableField("document_id", "reference %s" % tablename,
label = T("Document"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db,
"doc_document.id",
represent),
)
# ---------------------------------------------------------------------
# Images
#
# @ToDo: Field to determine which is the default image to use for
# e.g. a Map popup (like the profile picture)
# readable/writable=False except in the cases where-needed
#
doc_image_type_opts = {1: T("Photograph"),
2: T("Map"),
3: T("Document Scan"),
99: T("other")
}
tablename = "doc_image"
define_table(tablename,
# Component not instance
super_link(doc_id, "doc_entity"),
super_link("pe_id", "pr_pentity"), # @ToDo: Remove & make Persons doc entities instead?
super_link("site_id", "org_site"), # @ToDo: Remove since Site Instances are doc entities?
Field("file", "upload",
autodelete = True,
length = current.MAX_FILENAME_LENGTH,
represent = doc_image_represent,
requires = IS_EMPTY_OR(
IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)),
# Distingish from prepop
null = "",
),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads",
"images"),
widget = S3ImageCropWidget((600, 600)),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
label = T("Name"),
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(IS_URL()),
),
Field("type", "integer",
default = 1,
label = T("Image Type"),
represent = lambda opt: \
doc_image_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(doc_image_type_opts,
zero=None),
),
person_id(label = T("Author"),
),
organisation_id(),
s3_date(label = T("Date Taken"),
),
# @ToDo: Move location to link table
location_id(),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Photo"),
title_display = T("Photo Details"),
title_list = T("Photos"),
title_update = T("Edit Photo"),
label_list_button = T("List Photos"),
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Resource Configuration
configure(tablename,
deduplicate = self.document_duplicate,
onvalidation = lambda form: \
self.document_onvalidation(form, document=False)
)
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
document_id = S3ReusableField("document_id", "integer",
readable=False, writable=False)
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(file):
""" File representation """
if file:
try:
# Read the filename from the file
filename = current.db.doc_document.file.retrieve(file)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[file]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def document_duplicate(item):
""" Import item de-duplication """
data = item.data
query = None
file = data.get("file")
if file:
table = item.table
query = (table.file == file)
else:
url = data.get("url")
if url:
table = item.table
query = (table.url == url)
if query:
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
form_vars = form.vars
doc = form_vars.file
if doc is None:
# If this is a prepop, then file not in form
# Interactive forms with empty doc has this as "" not None
return
if not document:
encoded_file = form_vars.get("imagecrop-data", None)
if encoded_file:
# S3ImageCropWidget
import base64
import uuid
metadata, encoded_file = encoded_file.split(",")
filename, datatype, enctype = metadata.split(";")
f = Storage()
f.filename = uuid.uuid4().hex + filename
import cStringIO
f.file = cStringIO.StringIO(base64.decodestring(encoded_file))
doc = form_vars.file = f
if not form_vars.name:
form_vars.name = filename
if not hasattr(doc, "file") and not doc and not form_vars.url:
if document:
msg = current.T("Either file upload or document URL required.")
else:
msg = current.T("Either file upload or image URL required.")
form.errors.file = msg
form.errors.url = msg
if hasattr(doc, "file"):
name = form_vars.name
if not name:
# Use the filename
form_vars.name = doc.filename
else:
id = current.request.post_vars.id
if id:
if document:
tablename = "doc_document"
else:
tablename = "doc_image"
db = current.db
table = db[tablename]
record = db(table.id == id).select(table.file,
limitby=(0, 1)).first()
if record:
name = form_vars.name
if not name:
# Use the filename
form_vars.name = table.file.retrieve(record.file)[0]
# Do a checksum on the file to see if it's a duplicate
#import cgi
#if isinstance(doc, cgi.FieldStorage) and doc.filename:
# f = doc.file
# form_vars.checksum = doc_checksum(f.read())
# f.seek(0)
# if not form_vars.name:
# form_vars.name = doc.filename
#if form_vars.checksum is not None:
# # Duplicate allowed if original version is deleted
# query = ((table.checksum == form_vars.checksum) & \
# (table.deleted == False))
# result = db(query).select(table.name,
# limitby=(0, 1)).first()
# if result:
# doc_name = result.name
# form.errors["file"] = "%s %s" % \
# (T("This file already exists on the server as"), doc_name)
# -------------------------------------------------------------------------
@staticmethod
def document_onaccept(form):
"""
Build a full-text index
"""
form_vars = form.vars
doc = form_vars.file
table = current.db.doc_document
document = json.dumps(dict(filename=doc,
name=table.file.retrieve(doc)[0],
id=form_vars.id,
))
current.s3task.async("document_create_index",
args = [document])
# -------------------------------------------------------------------------
@staticmethod
def document_ondelete(row):
"""
Remove the full-text index
"""
db = current.db
table = db.doc_document
record = db(table.id == row.id).select(table.file,
limitby=(0, 1)).first()
document = json.dumps(dict(filename=record.file,
id=row.id,
))
current.s3task.async("document_delete_index",
args = [document])
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
@param filename: name of the image file
"""
if not filename:
return current.messages["NONE"]
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#import uuid
#anchor = "zoom-media-image-%s" % uuid.uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# =============================================================================
def doc_document_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Documents, e.g. on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["doc_document.id"]
item_class = "thumbnail"
raw = record._row
title = record["doc_document.name"]
file = raw["doc_document.file"] or ""
url = raw["doc_document.url"] or ""
date = record["doc_document.date"]
comments = raw["doc_document.comments"] or ""
if file:
try:
doc_name = current.s3db.doc_document.file.retrieve(file)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[file])
body = P(ICON("attachment"),
" ",
SPAN(A(doc_name,
_href=doc_url,
)
),
" ",
_class="card_1_line",
)
elif url:
body = P(ICON("link"),
" ",
SPAN(A(url,
_href=url,
)),
" ",
_class="card_1_line",
)
else:
# Shouldn't happen!
body = P(_class="card_1_line")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.doc_document
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="doc", f="document",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Document"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON("icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(body,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class doc_DocumentRepresent(S3Represent):
""" Representation of Documents """
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (doc_document.id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
try:
filename = row["doc_document.file"]
url = row["doc_document.url"]
except AttributeError:
return v
else:
if filename:
url = URL(c="default", f="download", args=filename)
return A(v, _href=url)
elif url:
return A(v, _href=url)
return v
# =============================================================================
class S3DocSitRepModel(S3Model):
"""
Situation Reports
"""
names = ("doc_sitrep",
"doc_sitrep_id",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Situation Reports
# - can be aggregated by OU
#
tablename = "doc_sitrep"
self.define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name", length=128,
label = T("Name"),
),
Field("description", "text",
label = T("Description"),
represent = lambda body: XML(body),
widget = s3_richtext_widget,
),
self.org_organisation_id(),
self.gis_location_id(
widget = S3LocationSelector(show_map = False),
),
s3_date(default = "now",
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Situation Report"),
title_display = T("Situation Report Details"),
title_list = T("Situation Reports"),
title_update = T("Edit Situation Report"),
title_upload = T("Import Situation Reports"),
label_list_button = T("List Situation Reports"),
label_delete_button = T("Delete Situation Report"),
msg_record_created = T("Situation Report added"),
msg_record_modified = T("Situation Report updated"),
msg_record_deleted = T("Situation Report deleted"),
msg_list_empty = T("No Situation Reports currently registered"))
crud_form = S3SQLCustomForm("name",
"description",
"organisation_id",
"location_id",
"date",
S3SQLInlineComponent(
"document",
name = "document",
label = T("Attachments"),
fields = [("", "file")],
),
"comments",
)
if current.deployment_settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
#filter = True,
#header = "",
)
filter_widgets = [org_filter,
S3LocationFilter(),
S3DateFilter("date"),
]
self.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = ["date",
"event_sitrep.incident_id",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"organisation_id",
"name",
(T("Attachments"), "document.file"),
"comments",
],
super_entity = "doc_entity",
)
# Components
self.add_components(tablename,
event_sitrep = {"name": "event_sitrep",
"joinby": "sitrep_id",
},
event_incident = {"link": "event_sitrep",
"joinby": "sitrep_id",
"key": "incident_id",
"actuate": "hide",
"multiple": "False",
#"autocomplete": "name",
"autodelete": False,
},
)
represent = S3Represent(lookup=tablename)
sitrep_id = S3ReusableField("sitrep_id", "reference %s" % tablename,
label = T("Situation Report"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "doc_sitrep.id",
represent,
orderby="doc_sitrep.name",
sort=True)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(doc_sitrep_id = sitrep_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(doc_sitrep_id = lambda **attr: dummy("sitrep_id"),
)
# END =========================================================================
| mit |
druuu/django | tests/postgres_tests/test_hstore.py | 193 | 9011 | import json
from django.core import exceptions, serializers
from django.forms import Form
from . import PostgreSQLTestCase
from .models import HStoreModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgreSQLTestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_any_keys=['a', 'c']),
self.objs[:3]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')),
self.objs[:2]
)
class TestSerialization(PostgreSQLTestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgreSQLTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_field_has_changed(self):
class HStoreFormTest(Form):
f1 = forms.HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}})
self.assertTrue(form_w_hstore.has_changed())
class TestValidator(PostgreSQLTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| bsd-3-clause |
barrachri/epcon | conference/utils/voteengine-0.99/votelib.py | 8 | 5242 | #Votelib module
"""
Votelib module by Blake Cretney
This work is distributed AS IS. It is up to you to
determine if it is useful and safe. In particular,
NO WARRANTY is expressed or implied.
I permanently give everyone the rights to use, modify,
copy, distribute, re-distribute, and perform this work,
and all derived works, to the extent that I hold copyright
in them. My intent is to have this work treated as
public domain.
This module is for the procedures that don't do I/O or anything like that.
"""
from string import *
import re
import numpy
import sys
# 2 funcs for converting to and from special letter form
# i.e. A-Z AA-ZZ AAA-ZZZ etc.
def alpha_to_int(x):
c=0
total=0
while c<len(x):
total=total*26+ord(x[c])-64
c=c+1
return(total)
def int_to_alpha(x):
alpha=""
sub=1
len=0
while 1:
sub=sub*26
len=len+1
temp=x-sub
if temp<=0: break
x=temp
x=x-1
while len>0:
alpha=chr(x%26+65) +alpha
x=x/26
len=len-1
return(alpha)
# change the matrix to a winning-votes matrix
def zero_defeats(x):
n=x.shape[0]
for i in xrange(n):
for j in xrange(i+1,n):
if x[i,j]==x[j,i]: x[i,j]=x[j,i]=0
elif x[i,j]>x[j,i]: x[j,i]=0
else: x[i,j]=0
# change the matrix to a marginal matrix
def to_margins(x):
n=x.shape[0]
for i in xrange(n):
for j in xrange(i+1,n):
m=x[i,j]-x[j,i]
x[i,j]=m
x[j,i]=-m
# returns <0 if x is preffered to y, >0 if y is preferred to x, 0 if no preference
def cmpTie(x,y,tiebreaker):
if tiebreaker==None: return(0)
xi=tiebreaker.index(x)
yi=tiebreaker.index(y)
return(xi-yi)
def break_ties(winmat,tiebreaker):
if tiebreaker==None: return
n=winmat.shape[0]
done=numpy.zeros((n),numpy.int_) # record of which
#candidates are already processed
while 1:
for i in tiebreaker:
if done[i]>0: continue
for j in xrange(n):
if i==j or done[j]>0: continue
if winmat[j,i]>0: break
else: break # if no defeat, use this i
else: break # no i was undefeated. Must be done
done[i]=1
for j in xrange(n):
if done[j]==0: winmat[i,j]=1
#winmat - matrix of wins and ties, no contradictions allowed
#candlist - same old list of candidate names
def print_ranks(winmat,candlist):
n=winmat.shape[0];
wins=numpy.zeros((n),numpy.int32)
for i in xrange(n):
for j in xrange(n):
if winmat[i,j]>winmat[j,i]: wins[i]=wins[i]+1;
order=[]
for i in xrange(n):
order= order+ [(wins[i],i)]
order.sort()
order.reverse()
ties=0
for i in xrange(n):
(c_wins,c)=order[i]
if c_wins<n-1-i:
ties=1
if i==0:
print "Tie for first place."
else:
print " ... ties prevent full ranking \n"
break
print candlist[c],
if i<n-1:
print ">",
if ties:
print "Some ties exist. See table."
print " ",
for j in xrange(n):
print rjust(candlist[j],5),
print
for i in xrange(n):
print ljust(candlist[i],3),
print " ",
for j in xrange(n):
if i==j: print " X",
elif winmat[i][j]>winmat[j][i]:
print " 1",
elif winmat[j][i]>winmat[i][j]:
print " 0",
else:
print " ?",
print
print
def print_some_scores(x,candlist,act):
n=x.shape[0]
print ' ',
for j in act:
print rjust(candlist[j],5),
print
for i in act:
print ljust(candlist[i],3), ' ',
for j in act:
if i==j: print ' X',
else: print rjust(`x[i,j]`,5),
print
print
def print_scores(x,candlist,act=None):
if(act):
print_some_scores(x,candlist,act)
return
n=x.shape[0]
print ' ',
for j in xrange(n):
print rjust(candlist[j],5),
print
for i in xrange(n):
print ljust(candlist[i],3), ' ',
for j in xrange(n):
if i==j: print ' X',
else: print rjust(`x[i,j]`,5),
print
print
def candRange(start,end): # translates selected range of candidates into list
if start=="": failure("Missing Range")
pat=re.compile(r'(?P<alpha>[A-Z]*)(?P<num>\d*)')
m=pat.match(start)
if(m==None): failure("Improper range")
start_alpha_raw=m.group('alpha')
start_num_raw=m.group('num')
m=pat.match(end)
if(m==None): failure("Improper range")
end_alpha_raw=m.group('alpha')
end_num_raw=m.group('num')
if (start_alpha_raw=="")!=(end_alpha_raw==""):
failure('alpha mismatch on range')
if (start_num_raw=="")!=(end_num_raw==""):
failure('Numeric mismatch on range')
if start_alpha_raw:
current_alpha=start_alpha=alpha_to_int(start_alpha_raw)
end_alpha=alpha_to_int(end_alpha_raw)
if start_alpha>end_alpha: failure('Alpha bound error on range')
if start_num_raw:
current_num=start_num=int(start_num_raw)
end_num=int(end_num_raw)
if start_num>end_num: failure('Numeric bound error on range')
carry=0
list=[]
while carry<2:
carry=0
c=""
if start_alpha_raw: c=c+int_to_alpha(current_alpha)
if start_num_raw: c=c+`current_num`
list=list+[c]
if start_num_raw:
if current_num==end_num:
carry=1
current_num=start_num
else: current_num=current_num+1
else: carry=1
if carry==1:
if start_alpha_raw:
if current_alpha==end_alpha:
carry=2
else: current_alpha=current_alpha+1
else: carry=2
return(list)
def floyd(m):
n=m.shape[0]
for k in xrange(n):
for i in xrange(n):
for j in xrange(n):
m[i,j]=max(m[i,j],min(m[i,k],m[k,j]))
| bsd-2-clause |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/twisted/web/_auth/wrapper.py | 47 | 8223 | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A guard implementation which supports HTTP header-based authentication
schemes.
If no I{Authorization} header is supplied, an anonymous login will be
attempted by using a L{Anonymous} credentials object. If such a header is
supplied and does not contain allowed credentials, or if anonymous login is
denied, a 401 will be sent in the response along with I{WWW-Authenticate}
headers for each of the allowed authentication schemes.
"""
from zope.interface import implements
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.web.resource import IResource, ErrorPage
from twisted.web import util
from twisted.cred import error
from twisted.cred.credentials import Anonymous
class UnauthorizedResource(object):
"""
Simple IResource to escape Resource dispatch
"""
implements(IResource)
isLeaf = True
def __init__(self, factories):
self._credentialFactories = factories
def render(self, request):
"""
Send www-authenticate headers to the client
"""
def generateWWWAuthenticate(scheme, challenge):
l = []
for k,v in challenge.iteritems():
l.append("%s=%s" % (k, quoteString(v)))
return "%s %s" % (scheme, ", ".join(l))
def quoteString(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
request.setResponseCode(401)
for fact in self._credentialFactories:
challenge = fact.getChallenge(request)
request.responseHeaders.addRawHeader(
'www-authenticate',
generateWWWAuthenticate(fact.scheme, challenge))
if request.method == 'HEAD':
return ''
return 'Unauthorized'
def getChildWithDefault(self, path, request):
"""
Disable resource dispatch
"""
return self
class HTTPAuthSessionWrapper(object):
"""
Wrap a portal, enforcing supported header-based authentication schemes.
@ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
avatars.
@ivar _credentialFactories: A list of L{ICredentialFactory} providers which
will be used to decode I{Authorization} headers into L{ICredentials}
providers.
"""
implements(IResource)
isLeaf = False
def __init__(self, portal, credentialFactories):
"""
Initialize a session wrapper
@type portal: C{Portal}
@param portal: The portal that will authenticate the remote client
@type credentialFactories: C{Iterable}
@param credentialFactories: The portal that will authenticate the
remote client based on one submitted C{ICredentialFactory}
"""
self._portal = portal
self._credentialFactories = credentialFactories
def _authorizedResource(self, request):
"""
Get the L{IResource} which the given request is authorized to receive.
If the proper authorization headers are present, the resource will be
requested from the portal. If not, an anonymous login attempt will be
made.
"""
authheader = request.getHeader('authorization')
if not authheader:
return util.DeferredResource(self._login(Anonymous()))
factory, respString = self._selectParseHeader(authheader)
if factory is None:
return UnauthorizedResource(self._credentialFactories)
try:
credentials = factory.decode(respString, request)
except error.LoginFailed:
return UnauthorizedResource(self._credentialFactories)
except:
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
else:
return util.DeferredResource(self._login(credentials))
def render(self, request):
"""
Find the L{IResource} avatar suitable for the given request, if
possible, and render it. Otherwise, perhaps render an error page
requiring authorization or describing an internal server failure.
"""
return self._authorizedResource(request).render(request)
def getChildWithDefault(self, path, request):
"""
Inspect the Authorization HTTP header, and return a deferred which,
when fired after successful authentication, will return an authorized
C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
be returned, essentially halting further dispatch on the wrapped
resource and all children
"""
# Don't consume any segments of the request - this class should be
# transparent!
request.postpath.insert(0, request.prepath.pop())
return self._authorizedResource(request)
def _login(self, credentials):
"""
Get the L{IResource} avatar for the given credentials.
@return: A L{Deferred} which will be called back with an L{IResource}
avatar or which will errback if authentication fails.
"""
d = self._portal.login(credentials, None, IResource)
d.addCallbacks(self._loginSucceeded, self._loginFailed)
return d
def _loginSucceeded(self, (interface, avatar, logout)):
"""
Handle login success by wrapping the resulting L{IResource} avatar
so that the C{logout} callback will be invoked when rendering is
complete.
"""
class ResourceWrapper(proxyForInterface(IResource, 'resource')):
"""
Wrap an L{IResource} so that whenever it or a child of it
completes rendering, the cred logout hook will be invoked.
An assumption is made here that exactly one L{IResource} from
among C{avatar} and all of its children will be rendered. If
more than one is rendered, C{logout} will be invoked multiple
times and probably earlier than desired.
"""
def getChildWithDefault(self, name, request):
"""
Pass through the lookup to the wrapped resource, wrapping
the result in L{ResourceWrapper} to ensure C{logout} is
called when rendering of the child is complete.
"""
return ResourceWrapper(self.resource.getChildWithDefault(name, request))
def render(self, request):
"""
Hook into response generation so that when rendering has
finished completely (with or without error), C{logout} is
called.
"""
request.notifyFinish().addBoth(lambda ign: logout())
return super(ResourceWrapper, self).render(request)
return ResourceWrapper(avatar)
def _loginFailed(self, result):
"""
Handle login failure by presenting either another challenge (for
expected authentication/authorization-related failures) or a server
error page (for anything else).
"""
if result.check(error.Unauthorized, error.LoginFailed):
return UnauthorizedResource(self._credentialFactories)
else:
log.err(
result,
"HTTPAuthSessionWrapper.getChildWithDefault encountered "
"unexpected error")
return ErrorPage(500, None, None)
def _selectParseHeader(self, header):
"""
Choose an C{ICredentialFactory} from C{_credentialFactories}
suitable to use to decode the given I{Authenticate} header.
@return: A two-tuple of a factory and the remaining portion of the
header value to be decoded or a two-tuple of C{None} if no
factory can decode the header value.
"""
elements = header.split(' ')
scheme = elements[0].lower()
for fact in self._credentialFactories:
if fact.scheme == scheme:
return (fact, ' '.join(elements[1:]))
return (None, None)
| gpl-2.0 |
b1-systems/kiwi | helper/schema_parser.py | 2 | 12547 | #!/usr/bin/env python
"""
Usage: schema_parser.py SCHEMA [--output=ARG]
Arguments:
SCHEMA Kiwi RelaxNG schema file to parse
Options:
--output This is output file (stdout used if not present)
"""
import docopt
from lxml import etree
from collections import namedtuple
import os.path
import logging
import sys
logging.basicConfig(level=logging.WARNING)
class SchemaNode(object):
Child = namedtuple('Child', ['node', 'properties'])
@classmethod
def is_type(self, node):
pass
def __init__(self, node, namespaces):
self.logger = logging.getLogger(self.__class__.__name__)
self.node = node
self.namespaces = namespaces
ref = node.xpath(
'(./parent::rng:define[@name])[1]',
namespaces=namespaces
)
if len(ref):
self.reference = ref[0].attrib['name']
else:
self.reference = None
self.documentation = None
self.paths = []
element_tree = node.getroottree()
path = element_tree.getelementpath(node)
self.x_path = path.replace('{%s}' % namespaces['rng'], 'rng:')
self.properties = ['optional', 'oneOrMore', 'zeroOrMore']
def get_documentation(self):
for candidate in self.node:
if candidate.tag.endswith('documentation'):
self.documentation = candidate.text.replace('\n', ' ')
if self.documentation is None:
self.documentation = ''
self.logger.warning(
'Node %s has no documentation in schema', self.name
)
return self.documentation
def query(self, query, ret_type=None):
nodes = []
for node in self.node.xpath(query, namespaces=self.namespaces):
if ret_type is None:
nodes.append(SchemaNode(node, self.namespaces))
else:
nodes.append(ret_type(node, self.namespaces))
return nodes
def get_children_by_type(self, ret_type, stop_types=None):
if stop_types is None:
stop_types = []
# Gets direct children of this node in its tree
children = self.children_in_tree(ret_type, stop_types)
# Follow references that are defined somewhere else in tree
s_types = [ret_type] + stop_types
for reference in self.children_in_tree(Reference, s_types):
if not reference.node.name.endswith('any'):
children += reference.node.resolve_reference(
ret_type, stop_types, reference.properties
)
return children
def children_in_tree(self, ret_type, stop_types, properties=None):
if properties is None:
properties = []
children = []
for child in self.node:
if self._is_in_types(child, stop_types):
continue
if ret_type.is_type(child):
children.append(SchemaNode.Child(
node=ret_type(child, self.namespaces),
properties=self._get_properties(child, properties)
))
else:
children += SchemaNode(child, self.namespaces).children_in_tree(
ret_type, stop_types, self._get_properties(child, properties)
)
return children
def _get_properties(self, node, properties):
props = []
for prop in self.properties:
if node.tag.endswith(prop):
props = [prop]
break
return props + properties
def _is_in_types(self, node, types):
for t in types:
if t.is_type(node):
return True
return False
class Reference(SchemaNode):
@classmethod
def is_type(self, node):
if 'name' in node.attrib and node.tag.endswith('ref'):
return True
return False
def __init__(self, node, namespaces):
super(Reference, self).__init__(node, namespaces)
if not self.is_type(node):
raise Exception('The given node is not a reference')
self.name = self.node.attrib['name']
def resolve_reference(self, ret_type, stop_types, properties):
define = self._find_define()
if define is None:
return []
# Gets direct children of this node in its tree
nodes = define.children_in_tree(ret_type, stop_types, properties)
# Follow references that are defined somewhere else in tree
s_types = stop_types + [ret_type]
for reference in define.children_in_tree(
Reference, s_types, properties
):
nodes += reference.node.resolve_reference(
ret_type, stop_types, reference.properties
)
return nodes
def _find_define(self):
node = self.query(
'//rng:define[@name=\'%s\']' % self.name
)
if len(node):
return node[0]
class Attribute(SchemaNode):
@classmethod
def is_type(self, node):
if 'name' in node.attrib and node.tag.endswith('attribute'):
return True
return False
def __init__(self, node, namespaces):
super(Attribute, self).__init__(node, namespaces)
if not self.is_type(node):
raise Exception('The given node is not an attribute')
self.documentation = None
self.name = self.node.attrib['name']
class Element(SchemaNode):
Child = namedtuple('Child', ['x_path', 'properties'])
@classmethod
def is_type(self, node):
if 'name' in node.attrib and node.tag.endswith('element'):
return True
return False
def __init__(self, node, namespaces):
super(Element, self).__init__(node, namespaces)
if not self.is_type(node):
raise Exception('The given node is not an element')
self.children = None
self.name = self.node.attrib['name']
self.attributes = self.get_children_by_type(Attribute, [Element])
def get_parent_paths(self):
parents = []
for path in filter(lambda x: '.' in x, self.paths):
parent = path.rsplit('.' + self.get_name(), 1)[0]
if len(parent):
parents.append(parent)
return parents
def get_children(self):
if self.children is None:
self.find_children()
return self.children
def find_children(self):
self.children = []
elements = []
for child in self.get_children_by_type(Element, [Attribute]):
self.children.append(Element.Child(
x_path=child.node.x_path,
properties=child.properties
))
elements.append(child.node)
return elements
class RNGSchemaParser(object):
def __init__(self, schema):
self.logger = logging.getLogger(self.__class__.__name__)
self.schema_tree = etree.parse(schema)
etree.strip_tags(self.schema_tree, etree.Comment)
self.nspaces = self.schema_tree.getroot().nsmap
self.nspaces.pop(None)
if 'rng' not in self.nspaces:
raise Exception('\'rng:\' namespace must be defined in namespaces map')
self.root_nodes = []
for start in self.schema_tree.xpath('//rng:start', namespaces=self.nspaces):
self.root_nodes += SchemaNode(start, self.nspaces).get_children_by_type(
Element, [Attribute]
)
if not self.root_nodes:
raise Exception('Not valid RelaxNG start tag')
self.elements = dict()
for root_node in self.root_nodes:
self._find_elements(root_node.node, None)
def x_path_query(self, query):
return self.schema_tree.xpath(query, namespaces=self.nspaces)
def write_element_doc(self, element, current_path, output):
if current_path is None:
current_path = []
element_path = '.'.join(current_path + [element.name])
level = element_path.count('.')
titles = '-_.,:;#~<>^*'
doc = '.. _k.%s:\n\n' % element_path
doc += '%s\n' % element.name
doc += titles[level] * len(element.name) + '\n\n'
if len(element.get_documentation()):
doc += '%s\n\n' % element.get_documentation()
parents = []
for path in [x for x in element.paths if len(x) > 1]:
parents.append(':ref:`k.%s`' % '.'.join(path[:-1]))
if len(parents):
doc += 'Parents:\n'
doc += ' These elements contain ``%s``: ' % element.name
doc += ', '.join(parents)
doc += '\n\n'
children = []
for child in element.get_children():
children.append(':ref:`%s <k.%s>` %s' % (
self.elements[child.x_path].name,
'.'.join([element_path, self.elements[child.x_path].name]),
self._properties_to_doc(child.properties)
))
if len(children):
doc += 'Children:\n'
doc += ' The following elements occur in ``%s``: ' % element.name
doc += ', '.join(children)
doc += '\n\n'
if len(element.attributes):
doc += 'List of attributes for ``%s``:\n\n' % element.name
for attribute in element.attributes:
doc += '* ``%s`` %s: %s\n' % (
attribute.node.name,
self._properties_to_doc(attribute.properties),
attribute.node.get_documentation()
)
doc += '\n'
if output:
with open(output, 'a') as f:
f.write(doc)
else:
sys.stdout.write(doc)
for child in element.get_children():
self.write_element_doc(
self.elements[child.x_path],
current_path + [element.name], output
)
def legend(self):
doc = '.. hint:: **Element quantifiers**\n\n'\
' * **Optional** elements are qualified with _`[?]`\n'\
' * Elements that occur **one or more** times are qualified with _`[+]`\n'\
' * Elements that occur **zero or more** times are qualified with _`[*]`\n'\
' * Required elements are not qualified\n\n'
return doc
def _properties_to_doc(self, properties):
if not properties:
return ''
if properties[0] == 'optional':
return '`[?]`_'
elif properties[0] == 'oneOrMore':
return '`[+]`_'
elif properties[0] == 'zeroOrMore':
return '`[*]`_'
return ''
def _find_elements(self, element, path):
if path is None:
path = []
if element.x_path not in self.elements:
self.elements[element.x_path] = element
for child in element.find_children():
self._find_elements(child, path + [element.name])
self.elements[element.x_path].paths.append(path + [element.name])
if __name__ == "__main__":
try:
logger = logging.getLogger(__name__)
arguments = docopt.docopt(__doc__)
ofile = arguments['--output']
if os.path.splitext(arguments['SCHEMA'])[-1] == '.rnc':
raise OSError("Need RNG instead of RNC")
if ofile and os.path.isfile(ofile):
logger.warning('Output file %s will be overwritten', ofile)
if os.path.isfile(arguments['SCHEMA']):
parser = RNGSchemaParser(arguments['SCHEMA'])
value = parser.x_path_query(
'(//rng:attribute[@name=\'schemaversion\']/rng:value)[1]'
)
if value:
version = value[0].text
else:
raise Exception('Schema version not found!')
title_label = '.. _schema-docs:\n\n'
title = 'Schema Documentation %s\n' % version
title += '=' * len(title) + '\n\n'
title = title_label + title
if ofile:
with open(ofile, 'w') as f:
f.write(title)
f.write(parser.legend())
else:
sys.stdout.write(title)
for element in parser.root_nodes:
parser.write_element_doc(
parser.elements[element.node.x_path], None, ofile
)
else:
raise Exception('File not found: %s' % arguments['SCHEMA'])
except Exception as e:
logger.error('Schema parser failed', exc_info=True)
exit(2)
| gpl-3.0 |
Jaiglissechef-i9100/f4ktion_kernel | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
s-monisha/LibreHatti | src/librehatti/programmeletter/stafflookups.py | 4 | 1101 | from django.db.models import Q
from django.utils.html import escape
from librehatti.suspense.models import Staff
from ajax_select import LookupChannel
class StaffLookup(LookupChannel):
model = Staff
def get_query(self, q, request):
staff = Staff.objects.all()
for value in q.split():
staff = staff.filter(Q(name__icontains=value)| \
Q(code__icontains=value) \
|Q(department__title__icontains=value)\
|Q(position__position__icontains=value))
return staff[0:15]
def get_result(self, obj):
return unicode(obj.name)
def format_match(self, obj):
return self.format_item_display(obj)
def format_item_display(self, obj):
result = Staff.objects.values('name','code',
'department__title','position__position').filter(id = obj.id)[0]
return "<b>Name:</b> %s <br> <b>Position:</b> %s <br> <b>department:</b> %s \
<br> <b>Code:</b> %s <hr>" %((result['name'], result['position__position'],
result['department__title'], result['code']))
| gpl-2.0 |
GetSomeBlocks/ServerStatus | resources/lib/unittest2/unittest2/runner.py | 164 | 6757 | """Running tests"""
import sys
import time
import unittest
from unittest2 import result
try:
from unittest2.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| mit |
katekuehl/ArtCam | submit.py | 1 | 1554 | import urllib
from PIL import Image, ImageTk
import time
def make_request(filename, style_id):
import requests
r = requests.post('http://turbo.deepart.io/api/post/', data={'style': style_id, 'return_url': 'http://my.return/'},
files={'input_image': ('file.jpg', open(filename, 'rb'), 'image/jpeg')})
img = r.text
link = ("http://turbo.deepart.io/media/output/%s.jpg" % img)
print link
time.sleep(1)
return link
def vid_deepart(filename, style_id, save_to):
link = make_request(filename, style_id)
image_name = urllib.urlretrieve(link, save_to)
def deepart(filename, style_id):
import os
count = 1
cwd = os.getcwd()
file = "images\output%d.jpg" % count
while os.path.isfile(os.path.join(cwd, file)):
count += 1
file = "images\output%d.jpg" % count
fname = cwd + "\images\output%d.jpg" % count
print("Processing...")
link = make_request(filename, style_id)
urllib.urlretrieve(link, fname)
if not check_image(fname):
time.sleep(1)
link = make_request(filename, style_id)
urllib.urlretrieve (link, fname)
count = 0
while not check_image(fname) and (count < 6):
link = make_request(filename, style_id)
urllib.urlretrieve(link, fname)
count += 1
if count >= 6:
print("Error in saving or opening output file")
return fname
def check_image(path):
try:
Image.open(path)
except IOError:
return False
return True
| mit |
ibinti/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/es/forms.py | 309 | 7537 | # -*- coding: utf-8 -*-
"""
Spanish-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
import re
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|8|9)\d{8}$',
max_length=None, min_length=None, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHKLMNPQS'
self.nie_types = 'XT'
id_card_re = re.compile(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), re.IGNORECASE)
super(ESIdentityCardNumberField, self).__init__(id_card_re, max_length=None, min_length=None,
error_message=self.default_error_messages['invalid%s' % (self.only_nif and '_only_nif' or '')],
*args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
nif_get_checksum = lambda d: self.nif_control[int(d)%23]
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
get_checksum = lambda d: str(11 - sum([int(digit) * int(control) for digit, control in zip(d, control_str)]) % 11).replace('10', '1').replace('11', '0')
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
from es_regions import REGION_CHOICES
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
from es_provinces import PROVINCE_CHOICES
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)]) for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
| apache-2.0 |
defionscode/ansible | lib/ansible/module_utils/facts/hardware/aix.py | 53 | 9947 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
class AIXHardware(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
cpu_facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
cpu_facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
if out:
data = out.split(' ')
cpu_facts['processor_cores'] = int(data[1])
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
memory_facts['swaptotal_mb'] = swaptotal_mb
memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
return memory_facts
def get_dmi_facts(self):
dmi_facts = {}
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
dmi_facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
dmi_facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
dmi_facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
dmi_facts['product_name'] = data[1].strip()
return dmi_facts
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
vgs_facts = {}
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc == 0 and out:
vgs_facts['vgs'] = {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
vgs_facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path, m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
pv_info = {'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
vgs_facts['vgs'][m.group(1)].append(pv_info)
return vgs_facts
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
mount_facts['mounts'].append({'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[6],
'time': '%s %s %s' % (fields[3], fields[4], fields[5])})
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
mount_facts['mounts'].append({'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype': fields[3],
'options': fields[7],
'time': '%s %s %s' % (fields[4], fields[5], fields[6])})
return mount_facts
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lsdev_cmd = self.module.get_bin_path('lsdev', True)
lsattr_cmd = self.module.get_bin_path('lsattr', True)
rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
for line in out_lsdev.splitlines():
field = line.split()
device_attrs = {}
device_name = field[0]
device_state = field[1]
device_type = field[2:]
lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
for attr in out_lsattr.splitlines():
attr_fields = attr.split()
attr_name = attr_fields[0]
attr_parameter = attr_fields[1]
device_attrs[attr_name] = attr_parameter
device_facts['devices'][device_name] = {
'state': device_state,
'type': ' '.join(device_type),
'attributes': device_attrs
}
return device_facts
class AIXHardwareCollector(HardwareCollector):
_platform = 'AIX'
_fact_class = AIXHardware
| gpl-3.0 |
privateip/ansible | lib/ansible/modules/system/hostname.py | 12 | 23676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: hostname
author:
- "Adrian Likins (@alikins)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname.
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI, Alpine Linux.
- Any distribution that uses systemd as their init system.
- Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace.
options:
name:
required: true
description:
- Name of the host
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils._text import to_bytes, to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and Facts(module).is_systemd_managed():
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
# ===========================================
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
distribution_version = get_distribution_version()
if distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"):
strategy_class = SLESStrategy
else:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True)
)
)
hostname = Hostname(module)
name = module.params['name']
changed = hostname.update_current_and_permanent_hostname()
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if __name__ == '__main__':
main()
| gpl-3.0 |
spothero/sbo-selenium | setup.py | 1 | 1689 | #!/usr/bin/env python
import os
from pip.download import PipSession
from pip.index import PackageFinder
from pip.req import parse_requirements
from setuptools import setup, find_packages
root_dir = os.path.abspath(os.path.dirname(__file__))
requirements_path = os.path.join(root_dir, 'requirements', 'base.txt')
session = PipSession()
finder = PackageFinder([], [], session=session)
requirements = parse_requirements(requirements_path, finder, session=session)
install_requires = [str(r.req) for r in requirements]
version = '0.4.4' # Remember to update docs/CHANGELOG.rst when this changes
setup(
name="sbo-selenium",
version=version,
packages=find_packages(),
zip_safe=False,
description="Selenium testing framework for Django applications",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Testing',
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Jeremy Bowman',
author_email='jbowman@safaribooksonline.com',
url='https://github.com/safarijv/sbo-selenium',
package_data={
'sbo_selenium': [
'static/js/*.js',
],
},
install_requires=install_requires,
)
| bsd-2-clause |
grumpa/work_evid | django_project/work_evid/urls.py | 1 | 1756 | from django.conf.urls import include, url
import django.contrib.auth.views
from work_evid import views
urlpatterns = [
url(r'^overviews/$', views.overviews, name='overviews'),
url(r'^delete_work/$', views.delete_work, name='delete_work'),
url(r'^work/$', views.WorkList.as_view(), name='work_list'),
url(r'^work/add/$', views.WorkCreate.as_view(), name='work_create'),
url(r'^work/detail/(?P<pk>\d+)/$', views.WorkDetail.as_view(), name='work_detail'),
url(r'^work/update/(?P<pk>\d+)/$', views.WorkUpdate.as_view(), name='work_update'),
url(r'^work/delete/(?P<pk>\d+)/$', views.WorkDelete.as_view(), name='work_delete'),
url(r'^firm/$', views.FirmList.as_view(), name='firm_list'),
url(r'^firm/add/$', views.FirmCreate.as_view(), name='firm_create'),
url(r'^firm/detail/(?P<pk>\d+)/$', views.FirmDetail.as_view(), name='firm_detail'),
url(r'^firm/update/(?P<pk>\d+)/$', views.FirmUpdate.as_view(), name='firm_update'),
url(r'^firm/delete/(?P<pk>\d+)/$', views.FirmDelete.as_view(), name='firm_delete'),
url(r'^todo/$', views.TodoList.as_view(), name='todo_list'),
url(r'^todo/(?P<firm>\d+)/$', views.TodoList.as_view(), name='todo_list_firm'),
url(r'^todo/add/$', views.TodoCreate.as_view(), name='todo_create'),
url(r'^todo/detail/(?P<pk>\d+)/$', views.TodoDetail.as_view(), name='todo_detail'),
url(r'^todo/update/(?P<pk>\d+)/$', views.TodoUpdate.as_view(), name='todo_update'),
url(r'^todo/delete/(?P<pk>\d+)/$', views.TodoDelete.as_view(), name='todo_delete'),
url(r'^accounts/login/$', django.contrib.auth.views.login, name='login'),
url(r'^accounts/logout/$', django.contrib.auth.views.logout, name='logout'),
url(r'^$', views.WorkList.as_view(), name='index'),
]
| bsd-3-clause |
cloudbau/cinder | cinder/tests/test_HpSanISCSIDriver.py | 2 | 16389 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.san.hp_lefthand import HpSanISCSIDriver
LOG = logging.getLogger(__name__)
class HpSanISCSITestCase(test.TestCase):
def setUp(self):
super(HpSanISCSITestCase, self).setUp()
self.stubs.Set(HpSanISCSIDriver, "_cliq_run",
self._fake_cliq_run)
self.stubs.Set(HpSanISCSIDriver, "_get_iscsi_properties",
self._fake_get_iscsi_properties)
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.san_ip = "10.0.0.1"
configuration.san_login = "foo"
configuration.san_password = "bar"
configuration.san_ssh_port = 16022
configuration.san_clustername = "CloudCluster1"
configuration.san_thin_provision = True
configuration.append_config_values(mox.IgnoreArg())
self.driver = HpSanISCSIDriver(configuration=configuration)
self.volume_name = "fakevolume"
self.snapshot_name = "fakeshapshot"
self.connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': 'fakehost'}
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': 1}
def tearDown(self):
super(HpSanISCSITestCase, self).tearDown()
def _fake_get_iscsi_properties(self, volume):
return self.properties
def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Return fake results for the various methods."""
def create_volume(cliq_args):
"""Create volume CLIQ input for test.
input = "createVolume description="fake description"
clusterName=Cluster01 volumeName=fakevolume
thinProvision=0 output=XML size=1GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['thinProvision'], '1')
self.assertEqual(cliq_args['size'], '1GB')
return output, None
def delete_volume(cliq_args):
"""Delete volume CLIQ input for test.
input = "deleteVolume volumeName=fakevolume prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def extend_volume(cliq_args):
"""Extend volume CLIQ input for test.
input = "modifyVolume description="fake description"
volumeName=fakevolume
output=XML size=2GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['size'], '2GB')
return output, None
def assign_volume(cliq_args):
"""Assign volume CLIQ input for test.
input = "assignVolumeToServer volumeName=fakevolume
serverName=fakehost
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="174" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'], self.connector['host'])
return output, None
def unassign_volume(cliq_args):
"""Unassign volume CLIQ input for test.
input = "unassignVolumeToServer volumeName=fakevolume
serverName=fakehost output=XML
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="205" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'], self.connector['host'])
return output, None
def create_snapshot(cliq_args):
"""Create snapshot CLIQ input for test.
input = "createSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def delete_snapshot(cliq_args):
"""Delete shapshot CLIQ input for test.
input = "deleteSnapshot snapshotName=fakesnapshot prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def create_volume_from_snapshot(cliq_args):
"""Create volume from snapshot CLIQ input for test.
input = "cloneSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def get_cluster_info(cliq_args):
"""Get cluster info CLIQ input for test.
input = "getClusterInfo clusterName=Cluster01 searchDepth=1
verbose=0 output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="1164" result="0">
<cluster blockSize="1024" description=""
maxVolumeSizeReplication1="622957690"
maxVolumeSizeReplication2="311480287"
minVolumeSize="262144" name="Cluster01"
pageSize="262144" spaceTotal="633697992"
storageNodeCount="2" unprovisionedSpace="622960574"
useVip="true">
<nsm ipAddress="10.0.1.7" name="111-vsa"/>
<nsm ipAddress="10.0.1.8" name="112-vsa"/>
<vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
</cluster></response></gauche>"""
return output, None
def get_volume_info(cliq_args):
"""Get volume info CLIQ input for test.
input = "getVolumeInfo volumeName=fakevolume output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<volume autogrowPages="4" availability="online"
blockSize="1024" bytesWritten="0" checkSum="false"
clusterName="Cluster01" created="2011-02-08T19:56:53Z"
deleting="false" description="" groupName="Group01"
initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
minReplication="1" name="vol-b" parity="0" replication="2"
reserveQuota="536870912" scratchQuota="4194304"
serialNumber="9fa5c8b2cca54b2948a63d8"
size="1073741824" stridePages="32" thinProvision="true">
<status description="OK" value="2"/>
<permission access="rw" authGroup="api-1"
chapName="chapusername" chapRequired="true"
id="25369" initiatorSecret="" iqn=""
iscsiEnabled="true" loadBalance="true"
targetSecret="supersecret"/>
</volume></response></gauche>"""
return output, None
def get_snapshot_info(cliq_args):
"""Get snapshot info CLIQ input for test.
input = "getSnapshotInfo snapshotName=fakesnapshot output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<snapshot applicationManaged="false" autogrowPages="32768"
automatic="false" availability="online" bytesWritten="0"
clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
deleting="false" description="" groupName="CloudGroup1"
id="730" initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudgroup1:73"
md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
replication="2" reserveQuota="536870912" scheduleId="0"
scratchQuota="4194304" scratchWritten="0"
serialNumber="a64b4f850539c07fb5ce3cee5db1fcce"
size="2147483648" stridePages="32"
volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce">
<status description="OK" value="2"/>
<permission access="rw"
authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
chapName="chapusername" chapRequired="true" id="25369"
initiatorSecret="" iqn="" iscsiEnabled="true"
loadBalance="true" targetSecret="supersecret"/>
</snapshot></response></gauche>"""
return output, None
def get_server_info(cliq_args):
"""Get server info CLIQ input for test.
input = "getServerInfo serverName=fakeName"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def create_server(cliq_args):
"""Create server CLIQ input for test.
input = "createServer serverName=fakeName initiator=something"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def test_error(cliq_args):
output = """<gauche version="1.0">
<response description="Volume '134234' not found."
name="CliqVolumeNotFound" processingTime="1083"
result="8000100c"/>
</gauche>"""
return output, None
self.assertEqual(cliq_args['output'], 'XML')
try:
verbs = {'createVolume': create_volume,
'deleteVolume': delete_volume,
'modifyVolume': extend_volume,
'assignVolumeToServer': assign_volume,
'unassignVolumeToServer': unassign_volume,
'createSnapshot': create_snapshot,
'deleteSnapshot': delete_snapshot,
'cloneSnapshot': create_volume_from_snapshot,
'getClusterInfo': get_cluster_info,
'getVolumeInfo': get_volume_info,
'getSnapshotInfo': get_snapshot_info,
'getServerInfo': get_server_info,
'createServer': create_server,
'testError': test_error}
except KeyError:
raise NotImplementedError()
return verbs[verb](cliq_args)
def test_create_volume(self):
volume = {'name': self.volume_name, 'size': 1}
model_update = self.driver.create_volume(volume)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
def test_delete_volume(self):
volume = {'name': self.volume_name}
self.driver.delete_volume(volume)
def test_extend_volume(self):
volume = {'name': self.volume_name}
self.driver.extend_volume(volume, 2)
def test_initialize_connection(self):
volume = {'name': self.volume_name}
result = self.driver.initialize_connection(volume, self.connector)
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertDictMatch(result['data'], self.properties)
def test_terminate_connection(self):
volume = {'name': self.volume_name}
self.driver.terminate_connection(volume, self.connector)
def test_create_snapshot(self):
snapshot = {'name': self.snapshot_name,
'volume_name': self.volume_name}
self.driver.create_snapshot(snapshot)
def test_delete_snapshot(self):
snapshot = {'name': self.snapshot_name}
self.driver.delete_snapshot(snapshot)
def test_create_volume_from_snapshot(self):
volume = {'name': self.volume_name}
snapshot = {'name': self.snapshot_name}
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
def test_cliq_error(self):
try:
self.driver._cliq_run_xml("testError", {})
except exception.VolumeBackendAPIException:
pass
| apache-2.0 |
aidanhs/servo | tests/wpt/harness/wptrunner/wptmanifest/backends/static.py | 190 | 6645 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import operator
from ..node import NodeVisitor
from ..parser import parse
class Compiler(NodeVisitor):
"""Compiler backend that evaluates conditional expressions
to give static output"""
def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form with conditional expressions
evaluated.
tree - The root node of the wptmanifest AST to compile
expr_data - A dictionary of key / value pairs to use when
evaluating conditional expressions
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
self._kwargs = kwargs
self.expr_data = expr_data
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.output_node = None
self.visit(tree)
return self.output_node
def visit_DataNode(self, node):
output_parent = self.output_node
if self.output_node is None:
assert node.parent is None
self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs)
else:
self.output_node = self.data_cls_getter(self.output_node, node)(node.data)
for child in node.children:
self.visit(child)
if output_parent is not None:
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
def visit_KeyValueNode(self, node):
key_name = node.data
key_value = None
for child in node.children:
value = self.visit(child)
if value is not None:
key_value = value
break
if key_value is not None:
self.output_node.set(key_name, key_value)
def visit_ValueNode(self, node):
return node.data
def visit_AtomNode(self, node):
return node.data
def visit_ListNode(self, node):
return [self.visit(child) for child in node.children]
def visit_ConditionalNode(self, node):
assert len(node.children) == 2
if self.visit(node.children[0]):
return self.visit(node.children[1])
def visit_StringNode(self, node):
value = node.data
for child in node.children:
value = self.visit(child)(value)
return value
def visit_NumberNode(self, node):
if "." in node.data:
return float(node.data)
else:
return int(node.data)
def visit_VariableNode(self, node):
value = self.expr_data[node.data]
for child in node.children:
value = self.visit(child)(value)
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
index = self.visit(node.children[0])
return lambda x: x[index]
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return operator(operand)
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
return operator(operand_0, operand_1)
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, name, **kwargs):
self.parent = None
self.name = name
self.children = []
self._data = {}
def __repr__(self):
return "<ManifestItem %s>" % (self.name)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key):
for node in [self, self.root]:
if key in node._data:
return node._data[key]
raise KeyError
def set(self, name, value):
self._data[name] = value
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def itervalues(self):
for item in self._flatten().itervalues():
yield item
def append(self, child):
child.parent = self
self.children.append(child)
return child
def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
return Compiler().compile(ast,
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
def compile(stream, expr_data, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
expr_data,
data_cls_getter=data_cls_getter,
**kwargs)
| mpl-2.0 |
Filechaser/nzbToMedia | core/autoProcess/autoProcessMovie.py | 2 | 23343 | # coding=utf-8
import os
import time
import requests
import json
import core
from core.nzbToMediaSceneExceptions import process_all_exceptions
from core.nzbToMediaUtil import convert_to_ascii, rmDir, find_imdbid, find_download, listMediaFiles, remoteDir, import_subs, server_responding, reportNzb
from core import logger
from core.transcoder import transcoder
requests.packages.urllib3.disable_warnings()
class autoProcessMovie(object):
def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None):
results = {}
params = {}
# determine cmd and params to send to CouchPotato to get our results
section = 'movies'
cmd = "media.list"
if release_id or imdbid:
section = 'media'
cmd = "media.get"
params['id'] = release_id or imdbid
url = "{0}{1}".format(baseURL, cmd)
logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params))
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 60))
except requests.ConnectionError:
logger.error("Unable to open URL {0}".format(url))
return results
try:
result = r.json()
except ValueError:
# ValueError catches simplejson's JSONDecodeError and json's ValueError
logger.error("CouchPotato returned the following non-json data")
for line in r.iter_lines():
logger.error("{0}".format(line))
return results
if not result['success']:
if 'error' in result:
logger.error('{0}'.format(result['error']))
else:
logger.error("no media found for id {0}".format(params['id']))
return results
# Gather release info and return it back, no need to narrow results
if release_id:
try:
id = result[section]['_id']
results[id] = result[section]
return results
except:
pass
# Gather release info and proceed with trying to narrow results to one release choice
movies = result[section]
if not isinstance(movies, list):
movies = [movies]
for movie in movies:
if movie['status'] not in ['active', 'done']:
continue
releases = movie['releases']
for release in releases:
try:
if release['status'] not in ['snatched', 'downloaded', 'done']:
continue
if download_id:
if download_id.lower() != release['download_info']['id'].lower():
continue
id = release['_id']
results[id] = release
except:
continue
# Narrow results by removing old releases by comparing their last_edit field
if len(results) > 1:
for id1, x1 in results.items():
for id2, x2 in results.items():
try:
if x2["last_edit"] > x1["last_edit"]:
results.pop(id1)
except:
continue
# Search downloads on clients for a match to try and narrow our results down to 1
if len(results) > 1:
for id, x in results.items():
try:
if not find_download(str(x['download_info']['downloader']).lower(), x['download_info']['id']):
results.pop(id)
except:
continue
return results
def command_complete(self, url, params, headers, section):
try:
r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError:
logger.error("Unable to open URL: {0}".format(url), section)
return None
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return None
else:
try:
return r.json()['state']
except (ValueError, KeyError):
# ValueError catches simplejson's JSONDecodeError and json's ValueError
logger.error("{0} did not return expected json data.".format(section), section)
return None
def CDH(self, url2, headers, section="MAIN"):
try:
r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError:
logger.error("Unable to open URL: {0}".format(url2), section)
return False
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return False
else:
try:
return r.json().get("enableCompletedDownloadHandling", False)
except ValueError:
# ValueError catches simplejson's JSONDecodeError and json's ValueError
return False
def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None):
cfg = dict(core.CFG[section][inputCategory])
host = cfg["host"]
port = cfg["port"]
apikey = cfg["apikey"]
if section == "CouchPotato":
method = cfg["method"]
else:
method = None
delete_failed = int(cfg["delete_failed"])
wait_for = int(cfg["wait_for"])
ssl = int(cfg.get("ssl", 0))
web_root = cfg.get("web_root", "")
remote_path = int(cfg.get("remote_path", 0))
protocol = "https://" if ssl else "http://"
omdbapikey = cfg.get("omdbapikey", "")
status = int(status)
if status > 0 and core.NOEXTRACTFAILED:
extract = 0
else:
extract = int(cfg.get("extract", 0))
imdbid = find_imdbid(dirName, inputName, omdbapikey)
if section == "CouchPotato":
baseURL = "{0}{1}:{2}{3}/api/{4}/".format(protocol, host, port, web_root, apikey)
if section == "Radarr":
baseURL = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root)
url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root)
headers = {'X-Api-Key': apikey}
if not apikey:
logger.info('No CouchPotato or Radarr apikey entered. Performing transcoder functions only')
release = None
elif server_responding(baseURL):
if section == "CouchPotato":
release = self.get_release(baseURL, imdbid, download_id)
else:
release = None
else:
logger.error("Server did not respond. Exiting", section)
return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)]
# pull info from release found if available
release_id = None
media_id = None
downloader = None
release_status_old = None
if release:
try:
release_id = release.keys()[0]
media_id = release[release_id]['media_id']
download_id = release[release_id]['download_info']['id']
downloader = release[release_id]['download_info']['downloader']
release_status_old = release[release_id]['status']
except:
pass
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName))
cleanName = os.path.splitext(SpecificPath)
if cleanName[1] == ".nzb":
SpecificPath = cleanName[0]
if os.path.isdir(SpecificPath):
dirName = SpecificPath
process_all_exceptions(inputName, dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
good_files = 0
num_files = 0
# Check video files for corruption
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
num_files += 1
if transcoder.isVideoGood(video, status):
import_subs(video)
good_files += 1
if num_files and good_files == num_files:
if status:
logger.info("Status shown as failed from Downloader, but {0} valid video files found. Setting as success.".format(good_files), section)
status = 0
elif num_files and good_files < num_files:
logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section)
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if failureLink:
failureLink += '&corrupt=true'
status = 1
elif clientAgent == "manual":
logger.warning("No media files found in directory {0} to manually process.".format(dirName), section)
return [0, ""] # Success (as far as this script is concerned)
else:
logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if status == 0:
if core.TRANSCODE == 1:
result, newDirName = transcoder.Transcode_directory(dirName)
if result == 0:
logger.debug("Transcoding succeeded for files in {0}".format(dirName), section)
dirName = newDirName
chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8)
logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section)
if chmod_directory:
logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section)
core.rchmod(dirName, chmod_directory)
else:
logger.error("Transcoding failed for files in {0}".format(dirName), section)
return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if not release and ".cp(tt" not in video and imdbid:
videoName, videoExt = os.path.splitext(video)
video2 = "{0}.cp({1}){2}".format(videoName, imdbid, videoExt)
if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'):
logger.debug('Renaming: {0} to: {1}'.format(video, video2))
os.rename(video, video2)
if not apikey: #If only using Transcoder functions, exit here.
logger.info('No CouchPotato or Radarr apikey entered. Processing completed.')
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
params = {}
if download_id:
params['downloader'] = downloader or clientAgent
params['download_id'] = download_id
params['media_folder'] = remoteDir(dirName) if remote_path else dirName
if section == "CouchPotato":
if method == "manage":
command = "manage.update"
params = {}
else:
command = "renamer.scan"
url = "{0}{1}".format(baseURL, command)
logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
logger.postprocess("Starting {0} scan for {1}".format(method, inputName), section)
if section == "Radarr":
payload = {'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id}
if not download_id:
payload.pop("downloadClientId")
logger.debug("Opening URL: {0} with PARAMS: {1}".format(baseURL, payload), section)
logger.postprocess("Starting DownloadedMoviesScan scan for {0}".format(inputName), section)
try:
if section == "CouchPotato":
r = requests.get(url, params=params, verify=False, timeout=(30, 1800))
else:
r = requests.post(baseURL, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error("Unable to open URL", section)
return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json()
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif section == "CouchPotato" and result['success']:
logger.postprocess("SUCCESS: Finished {0} scan for folder {1}".format(method, dirName), section)
if method == "manage":
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif section == "Radarr":
logger.postprocess("Radarr response: {0}".format(result['state']))
try:
res = json.loads(r.content)
scan_id = int(res['id'])
logger.debug("Scan started with id: {0}".format(scan_id), section)
Started = True
except Exception as e:
logger.warning("No scan id was returned due to: {0}".format(e), section)
scan_id = None
else:
logger.error("FAILED: {0} scan was unable to finish for folder {1}. exiting!".format(method, dirName),
section)
return [1, "{0}: Failed to post-process - Server did not return success".format(section)]
else:
core.FAILED = True
logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(inputName), section)
if failureLink:
reportNzb(failureLink, clientAgent)
if section == "Radarr":
logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section)
return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader.
if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName:
logger.postprocess("Deleting failed files and folder {0}".format(dirName), section)
rmDir(dirName)
if not release_id and not media_id:
logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(inputName),
section)
return [1, "{0}: Failed to post-process - Failed download not found in {1}".format(section, section)]
if release_id:
logger.postprocess("Setting failed release {0} to ignored ...".format(inputName), section)
url = "{url}release.ignore".format(url=baseURL)
params = {'id': release_id}
logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except requests.ConnectionError:
logger.error("Unable to open URL {0}".format(url), section)
return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json()
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']:
logger.postprocess("SUCCESS: {0} has been set to ignored ...".format(inputName), section)
else:
logger.warning("FAILED: Unable to set {0} to ignored!".format(inputName), section)
return [1, "{0}: Failed to post-process - Unable to set {1} to ignored".format(section, inputName)]
logger.postprocess("Trying to snatch the next highest ranked release.", section)
url = "{0}movie.searcher.try_next".format(baseURL)
logger.debug("Opening URL: {0}".format(url), section)
try:
r = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600))
except requests.ConnectionError:
logger.error("Unable to open URL {0}".format(url), section)
return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)]
result = r.json()
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif result['success']:
logger.postprocess("SUCCESS: Snatched the next highest release ...", section)
return [0, "{0}: Successfully snatched next highest release".format(section)]
else:
logger.postprocess("SUCCESS: Unable to find a new release to snatch now. CP will keep searching!", section)
return [0, "{0}: No new release found now. {1} will keep searching".format(section, section)]
# Added a release that was not in the wanted list so confirm rename successful by finding this movie media.list.
if not release:
download_id = None # we don't want to filter new releases based on this.
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * wait_for
while time.time() < timeout: # only wait 2 (default) minutes, then return.
logger.postprocess("Checking for status change, please stand by ...", section)
if section == "CouchPotato":
release = self.get_release(baseURL, imdbid, download_id, release_id)
scan_id = None
else:
release = None
if release:
try:
if release_id is None and release_status_old is None: # we didn't have a release before, but now we do.
logger.postprocess("SUCCESS: Movie {0} has now been added to CouchPotato".format(imdbid), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
release_status_new = release[release_id]['status']
if release_status_new != release_status_old:
logger.postprocess("SUCCESS: Release {0} has now been marked with a status of [{1}]".format(
inputName, str(release_status_new).upper()), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
except:
pass
elif scan_id:
url = "{0}/{1}".format(baseURL, scan_id)
command_status = self.command_complete(url, params, headers, section)
if command_status:
logger.debug("The Scan command return status: {0}".format(command_status), section)
if command_status in ['completed']:
logger.debug("The Scan command has completed successfully. Renaming was successful.", section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif command_status in ['failed']:
logger.debug("The Scan command has failed. Renaming was not successful.", section)
# return [1, "%s: Failed to post-process %s" % (section, inputName) ]
if not os.path.isdir(dirName):
logger.postprocess("SUCCESS: Input Directory [{0}] has been processed and removed".format(
dirName), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True):
logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format(
dirName), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)]
# pause and let CouchPotatoServer/Radarr catch its breath
time.sleep(10 * wait_for)
# The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now.
if section == "Radarr" and self.CDH(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
logger.warning(
"{0} does not appear to have changed status after {1} minutes, Please check your logs.".format(inputName, wait_for),
section)
return [1, "{0}: Failed to post-process - No change in status".format(section)]
| gpl-3.0 |
hirochachacha/apython | bpython/completion/completers/get_item_completer.py | 1 | 1344 | #!/usr/bin/env python
#coding: utf-8
import collections
import re
from six import PY3
from bpython.util import isolate, debug, getpreferredencoding
@isolate
def complete(expr, attr, locals_):
if not expr:
return []
try:
obj = eval(expr, locals_)
except Exception:
return []
else:
try:
pattern = re.compile(r'.*%s.*' % '.*'.join(list(attr)))
if isinstance(obj, collections.Mapping):
words = sorted(key_wrap(word) for word in obj.keys())
return [word for word in words if pattern.search(word)]
elif isinstance(obj, collections.Sequence):
words = (str(word) + ']' for word in range(len(obj)))
return [word for word in words if pattern.search(word)]
else:
return []
except (re.error, TypeError):
return []
def key_wrap(obj):
if PY3:
if isinstance(obj, str):
return '"' + obj + '"]'
elif isinstance(obj, bytes):
return 'b"' + obj.decode(getpreferredencoding()) + '"]'
else:
return obj
else:
if isinstance(obj, str):
return '"' + obj + '"]'
elif isinstance(obj, unicode):
return 'u"' + str(obj) + '"]'
else:
return obj
| mit |
LockScreen/Backend | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py | 484 | 10454 | from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
_dict_setitem = dict.__setitem__
_dict_getitem = dict.__getitem__
_dict_delitem = dict.__delitem__
_dict_contains = dict.__contains__
_dict_setdefault = dict.setdefault
class HTTPHeaderDict(dict):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
dict.__init__(self)
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
return _dict_setitem(self, key.lower(), (key, val))
def __getitem__(self, key):
val = _dict_getitem(self, key.lower())
return ', '.join(val[1:])
def __delitem__(self, key):
return _dict_delitem(self, key.lower())
def __contains__(self, key):
return _dict_contains(self, key.lower())
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other)
def __ne__(self, other):
return not self.__eq__(other)
values = MutableMapping.values
get = MutableMapping.get
update = MutableMapping.update
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = _dict_setdefault(self, key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
_dict_setitem(self, key_lower, [vals[0], vals[1], val])
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = _dict_getitem(self, key.lower())
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = _dict_getitem(other, key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
_dict_setitem(self, key, val)
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = _dict_getitem(self, key)
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = _dict_getitem(self, key)
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
| mit |
yonggang985/Sniper | scripts/ipctrace.py | 2 | 2069 | """
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'time': [ self.sd.getter('performance_model', core, 'elapsed_time') for core in range(sim.config.ncores) ],
'ffwd_time': [ self.sd.getter('fastforward_performance_model', core, 'fastforwarded_time') for core in range(sim.config.ncores) ],
'instrs': [ self.sd.getter('performance_model', core, 'instruction_count') for core in range(sim.config.ncores) ],
'coreinstrs': [ self.sd.getter('core', core, 'instructions') for core in range(sim.config.ncores) ],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[IPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for core in range(sim.config.ncores):
# detailed-only IPC
cycles = (self.stats['time'][core].delta - self.stats['ffwd_time'][core].delta) * sim.dvfs.get_frequency(core) / 1e9 # convert fs to cycles
instrs = self.stats['instrs'][core].delta
ipc = instrs / (cycles or 1) # Avoid division by zero
#self.fd.write(' %.3f' % ipc)
# include fast-forward IPCs
cycles = self.stats['time'][core].delta * sim.dvfs.get_frequency(core) / 1e9 # convert fs to cycles
instrs = self.stats['coreinstrs'][core].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
self.fd.write('\n')
sim.util.register(IpcTrace())
| mit |
pombredanne/kitchen-1 | kitchen3/kitchen/text/converters.py | 1 | 40837 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011-2012 Red Hat, Inc.
#
# kitchen is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# kitchen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with kitchen; if not, see <http://www.gnu.org/licenses/>
#
# Authors:
# Toshio Kuratomi <toshio@fedoraproject.org>
# Seth Vidal
#
# Portions of code taken from yum/i18n.py and
# python-fedora: fedora/textutils.py
'''
Functions to handle conversion of byte :class:`bytes` and :class:`str`
strings.
.. versionchanged:: kitchen 0.2a2 ; API kitchen.text 2.0.0
Added :func:`~kitchen.text.converters.getwriter`
.. versionchanged:: kitchen 0.2.2 ; API kitchen.text 2.1.0
Added :func:`~kitchen.text.converters.exception_to_unicode`,
:func:`~kitchen.text.converters.exception_to_bytes`,
:data:`~kitchen.text.converters.EXCEPTION_CONVERTERS`,
and :data:`~kitchen.text.converters.BYTE_EXCEPTION_CONVERTERS`
.. versionchanged:: kitchen 1.0.1 ; API kitchen.text 2.1.1
Deprecated :data:`~kitchen.text.converters.BYTE_EXCEPTION_CONVERTERS` as
we've simplified :func:`~kitchen.text.converters.exception_to_unicode` and
:func:`~kitchen.text.converters.exception_to_bytes` to make it unnecessary
'''
from base64 import b64encode, b64decode
import codecs
import warnings
import xml.sax.saxutils
from kitchen.text.exceptions import ControlCharError, XmlEncodeError
from kitchen.text.misc import guess_encoding, html_entities_unescape, \
isbytestring, isunicodestring, process_control_chars
#: Aliases for the utf-8 codec
_UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8',
'utf', 'UTF', 'u8', 'U8'))
#: Aliases for the latin-1 codec
_LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1',
'ISO8859-1', 'iso-8859-1', 'ISO-8859-1'))
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None,
non_string=None):
'''Convert an object into a :class:`str` string
:arg obj: Object to convert to a :class:`str` string. This should
normally be a byte :class:`bytes`
:kwarg encoding: What encoding to try converting the byte :class:`bytes` as.
Defaults to :term:`utf-8`
:kwarg errors: If errors are found while decoding, perform this action.
Defaults to ``replace`` which replaces the invalid bytes with
a character that means the bytes were unable to be decoded. Other
values are the same as the error handling schemes in the `codec base
classes
<http://docs.python.org/library/codecs.html#codec-base-classes>`_.
For instance ``strict`` which raises an exception and ``ignore`` which
simply omits the non-decodable characters.
:kwarg nonstring: How to treat nonstring values. Possible values are:
:simplerepr: Attempt to call the object's "simple representation"
method and return that value. Python-2.3+ has two methods that
try to return a simple representation: :meth:`object.__unicode__`
and :meth:`object.__str__`. We first try to get a usable value
from :meth:`object.__unicode__`. If that fails we try the same
with :meth:`object.__str__`.
:empty: Return an empty :class:`str` string
:strict: Raise a :exc:`TypeError`
:passthru: Return the object unchanged
:repr: Attempt to return a :class:`str` string of the repr of the
object
Default is ``simplerepr``
:kwarg non_string: *Deprecated* Use :attr:`nonstring` instead
:raises TypeError: if :attr:`nonstring` is ``strict`` and
a non-:class:`basestring` object is passed in or if :attr:`nonstring`
is set to an unknown value
:raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
:attr:`obj` is not decodable using the given encoding
:returns: :class:`str` string or the original object depending on the
value of :attr:`nonstring`.
Usually this should be used on a byte :class:`bytes` but it can take both
byte :class:`bytes` and :class:`str` strings intelligently. Nonstring
objects are handled in different ways depending on the setting of the
:attr:`nonstring` parameter.
The default values of this function are set so as to always return
a :class:`str` string and never raise an error when converting from
a byte :class:`bytes` to a :class:`str` string. However, when you do
not pass validly encoded text (or a nonstring object), you may end up with
output that you don't expect. Be sure you understand the requirements of
your data, not just ignore errors by passing it through this function.
.. versionchanged:: 0.2.1a2
Deprecated :attr:`non_string` in favor of :attr:`nonstring` parameter and changed
default value to ``simplerepr``
'''
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, str):
return obj
if isinstance(obj, (bytes, bytearray)):
if encoding in _UTF8_ALIASES:
return str(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
return obj.decode('latin-1', errors)
return obj.decode(encoding, errors)
if non_string:
warnings.warn('non_string is a deprecated parameter of'
' to_unicode(). Use nonstring instead', DeprecationWarning,
stacklevel=2)
if not nonstring:
nonstring = non_string
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
return ''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = ''
if not isunicodestring(simple):
return str(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
if not isunicodestring(obj_repr):
obj_repr = str(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
' a byte string (str) or a unicode string' %
{'obj': obj_repr.encode(encoding, 'replace')})
raise TypeError('nonstring value, %(param)s, is not set to a valid'
' action' % {'param': nonstring})
def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None,
non_string=None):
'''Convert an object into a byte :class:`bytes`
:arg obj: Object to convert to a byte :class:`bytes`. This should normally
be a :class:`str` string.
:kwarg encoding: Encoding to use to convert the :class:`str` string
into a byte :class:`bytes`. Defaults to :term:`utf-8`.
:kwarg errors: If errors are found while encoding, perform this action.
Defaults to ``replace`` which replaces the invalid bytes with
a character that means the bytes were unable to be encoded. Other
values are the same as the error handling schemes in the `codec base
classes
<http://docs.python.org/library/codecs.html#codec-base-classes>`_.
For instance ``strict`` which raises an exception and ``ignore`` which
simply omits the non-encodable characters.
:kwarg nonstring: How to treat nonstring values. Possible values are:
:simplerepr: Attempt to call the object's "simple representation"
method and return that value. Python-2.3+ has two methods that
try to return a simple representation: :meth:`object.__unicode__`
and :meth:`object.__str__`. We first try to get a usable value
from :meth:`object.__str__`. If that fails we try the same
with :meth:`object.__unicode__`.
:empty: Return an empty byte :class:`bytes`
:strict: Raise a :exc:`TypeError`
:passthru: Return the object unchanged
:repr: Attempt to return a byte :class:`bytes` of the :func:`repr` of the
object
Default is ``simplerepr``.
:kwarg non_string: *Deprecated* Use :attr:`nonstring` instead.
:raises TypeError: if :attr:`nonstring` is ``strict`` and
a non-:class:`basestring` object is passed in or if :attr:`nonstring`
is set to an unknown value.
:raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the
bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`.
:returns: byte :class:`bytes` or the original object depending on the value
of :attr:`nonstring`.
.. warning::
If you pass a byte :class:`bytes` into this function the byte
:class:`bytes` is returned unmodified. It is **not** re-encoded with
the specified :attr:`encoding`. The easiest way to achieve that is::
to_bytes(to_unicode(text), encoding='utf-8')
The initial :func:`to_unicode` call will ensure text is
a :class:`str` string. Then, :func:`to_bytes` will turn that into
a byte :class:`bytes` with the specified encoding.
Usually, this should be used on a :class:`str` string but it can take
either a byte :class:`bytes` or a :class:`str` string intelligently.
Nonstring objects are handled in different ways depending on the setting
of the :attr:`nonstring` parameter.
The default values of this function are set so as to always return a byte
:class:`bytes` and never raise an error when converting from unicode to
bytes. However, when you do not pass an encoding that can validly encode
the object (or a non-string object), you may end up with output that you
don't expect. Be sure you understand the requirements of your data, not
just ignore errors by passing it through this function.
.. versionchanged:: 0.2.1a2
Deprecated :attr:`non_string` in favor of :attr:`nonstring` parameter
and changed default value to ``simplerepr``
'''
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, (bytes, bytearray)):
return obj
if isinstance(obj, str):
return obj.encode(encoding, errors)
if non_string:
warnings.warn('non_string is a deprecated parameter of'
' to_bytes(). Use nonstring instead', DeprecationWarning,
stacklevel=2)
if not nonstring:
nonstring = non_string
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
return b''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
simple = str(obj)
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = repr(obj)
except (AttributeError, UnicodeError):
obj_repr = ''
if nonstring == 'repr':
obj_repr = obj_repr.encode(encoding, errors)
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
' a unicode string or a byte string (str)' % {'obj': obj_repr})
raise TypeError('nonstring value, %(param)s, is not set to a valid'
' action' % {'param': nonstring})
def getwriter(encoding):
'''Return a :class:`codecs.StreamWriter` that resists tracing back.
:arg encoding: Encoding to use for transforming :class:`str` strings
into byte :class:`bytes`.
:rtype: :class:`codecs.StreamWriter`
:returns: :class:`~codecs.StreamWriter` that you can instantiate to wrap output
streams to automatically translate :class:`str` strings into :attr:`encoding`.
This is a reimplemetation of :func:`codecs.getwriter` that returns
a :class:`~codecs.StreamWriter` that resists issuing tracebacks. The
:class:`~codecs.StreamWriter` that is returned uses
:func:`kitchen.text.converters.to_bytes` to convert :class:`str`
strings into byte :class:`bytes`. The departures from
:func:`codecs.getwriter` are:
1) The :class:`~codecs.StreamWriter` that is returned will take byte
:class:`bytes` as well as :class:`str` strings. Any byte
:class:`bytes` will be passed through unmodified.
2) The default error handler for unknown bytes is to ``replace`` the bytes
with the unknown character (``?`` in most ascii-based encodings, ``�``
in the utf encodings) whereas :func:`codecs.getwriter` defaults to
``strict``. Like :class:`codecs.StreamWriter`, the returned
:class:`~codecs.StreamWriter` can have its error handler changed in
code by setting ``stream.errors = 'new_handler_name'``
Example usage::
$ LC_ALL=C python
>>> import sys
>>> from kitchen.text.converters import getwriter
>>> UTF8Writer = getwriter('utf-8')
>>> unwrapped_stdout = sys.stdout
>>> sys.stdout = UTF8Writer(unwrapped_stdout)
>>> print 'caf\\xc3\\xa9'
café
>>> print u'caf\\xe9'
café
>>> ASCIIWriter = getwriter('ascii')
>>> sys.stdout = ASCIIWriter(unwrapped_stdout)
>>> print 'caf\\xc3\\xa9'
café
>>> print u'caf\\xe9'
caf?
.. seealso::
API docs for :class:`codecs.StreamWriter` and :func:`codecs.getwriter`
and `Print Fails <http://wiki.python.org/moin/PrintFails>`_ on the
python wiki.
.. versionadded:: kitchen 0.2a2, API: kitchen.text 1.1.0
'''
class _StreamWriter(codecs.StreamWriter):
# :W0223: We don't need to implement all methods of StreamWriter.
# This is not the actual class that gets used but a replacement for
# the actual class.
# :C0111: We're implementing an API from the stdlib. Just point
# people at that documentation instead of writing docstrings here.
#pylint:disable-msg=W0223,C0111
def __init__(self, stream, errors='replace'):
codecs.StreamWriter.__init__(self, stream, errors)
def encode(self, msg, errors='replace'):
print(type(msg))
print(repr(msg))
return (to_bytes(msg, encoding=self.encoding, errors=errors),
len(msg))
_StreamWriter.encoding = encoding
return _StreamWriter
def to_utf8(obj, errors='replace', non_string='passthru'):
'''*Deprecated*
Convert :class:`str` to an encoded :term:`utf-8` byte :class:`bytes`.
You should be using :func:`to_bytes` instead::
to_bytes(obj, encoding='utf-8', non_string='passthru')
'''
warnings.warn('kitchen.text.converters.to_utf8 is deprecated. Use'
' kitchen.text.converters.to_bytes(obj, encoding="utf-8",'
' nonstring="passthru" instead.', DeprecationWarning, stacklevel=2)
return to_bytes(obj, encoding='utf-8', errors=errors,
nonstring=non_string)
### str is also the type name for byte strings so it's not a good name for
### something that can return unicode strings
def to_str(obj):
'''*Deprecated*
This function converts something to a byte :class:`bytes` if it isn't one.
It's used to call :func:`str` or :func:`unicode` on the object to get its
simple representation without danger of getting a :exc:`UnicodeError`.
You should be using :func:`to_unicode` or :func:`to_bytes` explicitly
instead.
If you need :class:`str` strings::
to_unicode(obj, nonstring='simplerepr')
If you need byte :class:`bytes`::
to_bytes(obj, nonstring='simplerepr')
'''
warnings.warn('to_str is deprecated. Use to_unicode or to_bytes'
' instead. See the to_str docstring for porting information.',
DeprecationWarning, stacklevel=2)
return to_bytes(obj, nonstring='simplerepr')
# Exception message extraction functions
EXCEPTION_CONVERTERS = (lambda e: e.args[0], lambda e: e)
''' Tuple of functions to try to use to convert an exception into a string
representation. Its main use is to extract a string (:class:`str` or
:class:`bytes`) from an exception object in :func:`exception_to_unicode` and
:func:`exception_to_bytes`. The functions here will try the exception's
``args[0]`` and the exception itself (roughly equivalent to
`str(exception)`) to extract the message. This is only a default and can
be easily overridden when calling those functions. There are several
reasons you might wish to do that. If you have exceptions where the best
string representing the exception is not returned by the default
functions, you can add another function to extract from a different
field::
from kitchen.text.converters import (EXCEPTION_CONVERTERS,
exception_to_unicode)
class MyError(Exception):
def __init__(self, message):
self.value = message
c = [lambda e: e.value]
c.extend(EXCEPTION_CONVERTERS)
try:
raise MyError('An Exception message')
except MyError, e:
print exception_to_unicode(e, converters=c)
Another reason would be if you're converting to a byte :class:`bytes` and
you know the :class:`bytes` needs to be a non-:term:`utf-8` encoding.
:func:`exception_to_bytes` defaults to :term:`utf-8` but if you convert
into a byte :class:`bytes` explicitly using a converter then you can choose
a different encoding::
from kitchen.text.converters import (EXCEPTION_CONVERTERS,
exception_to_bytes, to_bytes)
c = [lambda e: to_bytes(e.args[0], encoding='euc_jp'),
lambda e: to_bytes(e, encoding='euc_jp')]
c.extend(EXCEPTION_CONVERTERS)
try:
do_something()
except Exception, e:
log = open('logfile.euc_jp', 'a')
log.write('%s\n' % exception_to_bytes(e, converters=c)
log.close()
Each function in this list should take the exception as its sole argument
and return a string containing the message representing the exception.
The functions may return the message as a :byte class:`bytes`,
a :class:`str` string, or even an object if you trust the object to
return a decent string representation. The :func:`exception_to_unicode`
and :func:`exception_to_bytes` functions will make sure to convert the
string to the proper type before returning.
.. versionadded:: 0.2.2
'''
BYTE_EXCEPTION_CONVERTERS = (lambda e: to_bytes(e.args[0]), to_bytes)
'''*Deprecated*: Use :data:`EXCEPTION_CONVERTERS` instead.
Tuple of functions to try to use to convert an exception into a string
representation. This tuple is similar to the one in
:data:`EXCEPTION_CONVERTERS` but it's used with :func:`exception_to_bytes`
instead. Ideally, these functions should do their best to return the data
as a byte :class:`bytes` but the results will be run through
:func:`to_bytes` before being returned.
.. versionadded:: 0.2.2
.. versionchanged:: 1.0.1
Deprecated as simplifications allow :data:`EXCEPTION_CONVERTERS` to
perform the same function.
'''
def exception_to_unicode(exc, converters=EXCEPTION_CONVERTERS):
'''Convert an exception object into a unicode representation
:arg exc: Exception object to convert
:kwarg converters: List of functions to use to convert the exception into
a string. See :data:`EXCEPTION_CONVERTERS` for the default value and
an example of adding other converters to the defaults. The functions
in the list are tried one at a time to see if they can extract
a string from the exception. The first one to do so without raising
an exception is used.
:returns: :class:`str` string representation of the exception. The
value extracted by the :attr:`converters` will be converted into
:class:`str` before being returned using the :term:`utf-8`
encoding. If you know you need to use an alternate encoding add
a function that does that to the list of functions in
:attr:`converters`)
.. versionadded:: 0.2.2
'''
msg = '<exception failed to convert to text>'
for func in converters:
try:
msg = func(exc)
except:
pass
else:
break
return to_unicode(msg)
def exception_to_bytes(exc, converters=EXCEPTION_CONVERTERS):
'''Convert an exception object into a str representation
:arg exc: Exception object to convert
:kwarg converters: List of functions to use to convert the exception into
a string. See :data:`EXCEPTION_CONVERTERS` for the default value and
an example of adding other converters to the defaults. The functions
in the list are tried one at a time to see if they can extract
a string from the exception. The first one to do so without raising
an exception is used.
:returns: byte :class:`bytes` representation of the exception. The value
extracted by the :attr:`converters` will be converted into
:class:`bytes` before being returned using the :term:`utf-8` encoding.
If you know you need to use an alternate encoding add a function that
does that to the list of functions in :attr:`converters`)
.. versionadded:: 0.2.2
.. versionchanged:: 1.0.1
Code simplification allowed us to switch to using
:data:`EXCEPTION_CONVERTERS` as the default value of
:attr:`converters`.
'''
msg = b'<exception failed to convert to text>'
for func in converters:
try:
msg = func(exc)
except:
pass
else:
break
return to_bytes(msg)
#
# XML Related Functions
#
def unicode_to_xml(string, encoding='utf-8', attrib=False,
control_chars='replace'):
'''Take a :class:`str` string and turn it into a byte :class:`bytes`
suitable for xml
:arg string: :class:`str` string to encode into an XML compatible byte
:class:`bytes`
:kwarg encoding: encoding to use for the returned byte :class:`bytes`.
Default is to encode to :term:`UTF-8`. If some of the characters in
:attr:`string` are not encodable in this encoding, the unknown
characters will be entered into the output string using xml character
references.
:kwarg attrib: If :data:`True`, quote the string for use in an xml
attribute. If :data:`False` (default), quote for use in an xml text
field.
:kwarg control_chars: :term:`control characters` are not allowed in XML
documents. When we encounter those we need to know what to do. Valid
options are:
:replace: (default) Replace the control characters with ``?``
:ignore: Remove the characters altogether from the output
:strict: Raise an :exc:`~kitchen.text.exceptions.XmlEncodeError` when
we encounter a :term:`control character`
:raises kitchen.text.exceptions.XmlEncodeError: If :attr:`control_chars`
is set to ``strict`` and the string to be made suitable for output to
xml contains :term:`control characters` or if :attr:`string` is not
a :class:`str` string then we raise this exception.
:raises ValueError: If :attr:`control_chars` is set to something other than
``replace``, ``ignore``, or ``strict``.
:rtype: byte :class:`bytes`
:returns: representation of the :class:`str` string as a valid XML
byte :class:`bytes`
XML files consist mainly of text encoded using a particular charset. XML
also denies the use of certain bytes in the encoded text (example: ``ASCII
Null``). There are also special characters that must be escaped if they
are present in the input (example: ``<``). This function takes care of
all of those issues for you.
There are a few different ways to use this function depending on your
needs. The simplest invocation is like this::
unicode_to_xml(u'String with non-ASCII characters: <"á と">')
This will return the following to you, encoded in :term:`utf-8`::
'String with non-ASCII characters: <"á と">'
Pretty straightforward. Now, what if you need to encode your document in
something other than :term:`utf-8`? For instance, ``latin-1``? Let's
see::
unicode_to_xml(u'String with non-ASCII characters: <"á と">', encoding='latin-1')
'String with non-ASCII characters: <"á と">'
Because the ``と`` character is not available in the ``latin-1`` charset,
it is replaced with ``と`` in our output. This is an xml character
reference which represents the character at unicode codepoint ``12392``, the
``と`` character.
When you want to reverse this, use :func:`xml_to_unicode` which will turn
a byte :class:`bytes` into a :class:`str` string and replace the xml
character references with the unicode characters.
XML also has the quirk of not allowing :term:`control characters` in its
output. The :attr:`control_chars` parameter allows us to specify what to
do with those. For use cases that don't need absolute character by
character fidelity (example: holding strings that will just be used for
display in a GUI app later), the default value of ``replace`` works well::
unicode_to_xml(u'String with disallowed control chars: \\u0000\\u0007')
'String with disallowed control chars: ??'
If you do need to be able to reproduce all of the characters at a later
date (examples: if the string is a key value in a database or a path on a
filesystem) you have many choices. Here are a few that rely on ``utf-7``,
a verbose encoding that encodes :term:`control characters` (as well as
non-:term:`ASCII` unicode values) to characters from within the
:term:`ASCII` printable characters. The good thing about doing this is
that the code is pretty simple. You just need to use ``utf-7`` both when
encoding the field for xml and when decoding it for use in your python
program::
unicode_to_xml(u'String with unicode: と and control char: \u0007', encoding='utf7')
'String with unicode: +MGg and control char: +AAc-'
# [...]
xml_to_unicode('String with unicode: +MGg and control char: +AAc-', encoding='utf7')
u'String with unicode: と and control char: \u0007'
As you can see, the ``utf-7`` encoding will transform even characters that
would be representable in :term:`utf-8`. This can be a drawback if you
want unicode characters in the file to be readable without being decoded
first. You can work around this with increased complexity in your
application code::
encoding = 'utf-8'
u_string = u'String with unicode: と and control char: \u0007'
try:
# First attempt to encode to utf8
data = unicode_to_xml(u_string, encoding=encoding, errors='strict')
except XmlEncodeError:
# Fallback to utf-7
encoding = 'utf-7'
data = unicode_to_xml(u_string, encoding=encoding, errors='strict')
write_tag('<mytag encoding=%s>%s</mytag>' % (encoding, data))
# [...]
encoding = tag.attributes.encoding
u_string = xml_to_unicode(u_string, encoding=encoding)
Using code similar to that, you can have some fields encoded using your
default encoding and fallback to ``utf-7`` if there are :term:`control
characters` present.
.. note::
If your goal is to preserve the :term:`control characters` you cannot
save the entire file as ``utf-7`` and set the xml encoding parameter
to ``utf-7`` if your goal is to preserve the :term:`control
characters`. Because XML doesn't allow :term:`control characters`,
you have to encode those separate from any encoding work that the XML
parser itself knows about.
.. seealso::
:func:`bytes_to_xml`
if you're dealing with bytes that are non-text or of an unknown
encoding that you must preserve on a byte for byte level.
:func:`guess_encoding_to_xml`
if you're dealing with strings in unknown encodings that you don't
need to save with char-for-char fidelity.
'''
if not string:
# Small optimization
return b''
try:
process_control_chars(string, strategy=control_chars)
except TypeError:
raise XmlEncodeError('unicode_to_xml must have a unicode type as'
' the first argument. Use bytes_string_to_xml for byte'
' strings.')
except ValueError:
raise ValueError('The control_chars argument to unicode_to_xml'
' must be one of ignore, replace, or strict')
except ControlCharError as exc:
raise XmlEncodeError(exc.args[0])
# Escape characters that have special meaning in xml
if attrib:
string = xml.sax.saxutils.escape(string, entities={'"':"""})
else:
string = xml.sax.saxutils.escape(string)
string = string.encode(encoding, 'xmlcharrefreplace')
return string
def xml_to_unicode(byte_string, encoding='utf-8', errors='replace'):
'''Transform a byte :class:`bytes` from an xml file into a :class:`str`
string
:arg byte_string: byte :class:`bytes` to decode
:kwarg encoding: encoding that the byte :class:`bytes` is in
:kwarg errors: What to do if not every character is valid in
:attr:`encoding`. See the :func:`to_unicode` documentation for legal
values.
:rtype: :class:`str` string
:returns: string decoded from :attr:`byte_string`
This function attempts to reverse what :func:`unicode_to_xml` does. It
takes a byte :class:`bytes` (presumably read in from an xml file) and
expands all the html entities into unicode characters and decodes the byte
:class:`bytes` into a :class:`str` string. One thing it cannot do is
restore any :term:`control characters` that were removed prior to
inserting into the file. If you need to keep such characters you need to
use :func:`xml_to_bytes` and :func:`bytes_to_xml` or use on of the
strategies documented in :func:`unicode_to_xml` instead.
'''
string = to_unicode(byte_string, encoding=encoding, errors=errors)
string = html_entities_unescape(string)
return string
def byte_string_to_xml(byte_string, input_encoding='utf-8', errors='replace',
output_encoding='utf-8', attrib=False, control_chars='replace'):
'''Make sure a byte :class:`bytes` is validly encoded for xml output
:arg byte_string: Byte :class:`bytes` to turn into valid xml output
:kwarg input_encoding: Encoding of :attr:`byte_string`. Default ``utf-8``
:kwarg errors: How to handle errors encountered while decoding the
:attr:`byte_string` into :class:`str` at the beginning of the
process. Values are:
:replace: (default) Replace the invalid bytes with a ``?``
:ignore: Remove the characters altogether from the output
:strict: Raise an :exc:`UnicodeDecodeError` when we encounter
a non-decodable character
:kwarg output_encoding: Encoding for the xml file that this string will go
into. Default is ``utf-8``. If all the characters in
:attr:`byte_string` are not encodable in this encoding, the unknown
characters will be entered into the output string using xml character
references.
:kwarg attrib: If :data:`True`, quote the string for use in an xml
attribute. If :data:`False` (default), quote for use in an xml text
field.
:kwarg control_chars: XML does not allow :term:`control characters`. When
we encounter those we need to know what to do. Valid options are:
:replace: (default) Replace the :term:`control characters` with ``?``
:ignore: Remove the characters altogether from the output
:strict: Raise an error when we encounter a :term:`control character`
:raises XmlEncodeError: If :attr:`control_chars` is set to ``strict`` and
the string to be made suitable for output to xml contains
:term:`control characters` then we raise this exception.
:raises UnicodeDecodeError: If errors is set to ``strict`` and the
:attr:`byte_string` contains bytes that are not decodable using
:attr:`input_encoding`, this error is raised
:rtype: byte :class:`bytes`
:returns: representation of the byte :class:`bytes` in the output encoding with
any bytes that aren't available in xml taken care of.
Use this when you have a byte :class:`bytes` representing text that you need
to make suitable for output to xml. There are several cases where this
is the case. For instance, if you need to transform some strings encoded
in ``latin-1`` to :term:`utf-8` for output::
utf8_string = byte_string_to_xml(latin1_string, input_encoding='latin-1')
If you already have strings in the proper encoding you may still want to
use this function to remove :term:`control characters`::
cleaned_string = byte_string_to_xml(string, input_encoding='utf-8', output_encoding='utf-8')
.. seealso::
:func:`unicode_to_xml`
for other ideas on using this function
'''
if not isbytestring(byte_string):
raise XmlEncodeError('byte_string_to_xml can only take a byte'
' string as its first argument. Use unicode_to_xml for'
' unicode (str) strings')
# Decode the string into unicode
u_string = str(byte_string, input_encoding, errors)
return unicode_to_xml(u_string, encoding=output_encoding,
attrib=attrib, control_chars=control_chars)
def xml_to_byte_string(byte_string, input_encoding='utf-8', errors='replace',
output_encoding='utf-8'):
'''Transform a byte :class:`bytes` from an xml file into :class:`str`
string
:arg byte_string: byte :class:`bytes` to decode
:kwarg input_encoding: encoding that the byte :class:`bytes` is in
:kwarg errors: What to do if not every character is valid in
:attr:`encoding`. See the :func:`to_unicode` docstring for legal
values.
:kwarg output_encoding: Encoding for the output byte :class:`bytes`
:returns: :class:`str` string decoded from :attr:`byte_string`
This function attempts to reverse what :func:`unicode_to_xml` does. It
takes a byte :class:`bytes` (presumably read in from an xml file) and
expands all the html entities into unicode characters and decodes the
byte :class:`bytes` into a :class:`str` string. One thing it cannot do
is restore any :term:`control characters` that were removed prior to
inserting into the file. If you need to keep such characters you need to
use :func:`xml_to_bytes` and :func:`bytes_to_xml` or use one of the
strategies documented in :func:`unicode_to_xml` instead.
'''
string = xml_to_unicode(byte_string, input_encoding, errors)
return to_bytes(string, output_encoding, errors)
def bytes_to_xml(byte_string, *args, **kwargs):
'''Return a byte :class:`bytes` encoded so it is valid inside of any xml
file
:arg byte_string: byte :class:`bytes` to transform
:arg \*args, \*\*kwargs: extra arguments to this function are passed on to
the function actually implementing the encoding. You can use this to
tweak the output in some cases but, as a general rule, you shouldn't
because the underlying encoding function is not guaranteed to remain
the same.
:rtype: byte :class:`bytes` consisting of all :term:`ASCII` characters
:returns: byte :class:`bytes` representation of the input. This will be encoded
using base64.
This function is made especially to put binary information into xml
documents.
This function is intended for encoding things that must be preserved
byte-for-byte. If you want to encode a byte string that's text and don't
mind losing the actual bytes you probably want to try :func:`byte_string_to_xml`
or :func:`guess_encoding_to_xml` instead.
.. note::
Although the current implementation uses :func:`base64.b64encode` and
there's no plans to change it, that isn't guaranteed. If you want to
make sure that you can encode and decode these messages it's best to
use :func:`xml_to_bytes` if you use this function to encode.
'''
# Can you do this yourself? Yes, you can.
return b64encode(byte_string, *args, **kwargs)
def xml_to_bytes(byte_string, *args, **kwargs):
'''Decode a string encoded using :func:`bytes_to_xml`
:arg byte_string: byte :class:`bytes` to transform. This should be a base64
encoded sequence of bytes originally generated by :func:`bytes_to_xml`.
:arg \*args, \*\*kwargs: extra arguments to this function are passed on to
the function actually implementing the encoding. You can use this to
tweak the output in some cases but, as a general rule, you shouldn't
because the underlying encoding function is not guaranteed to remain
the same.
:rtype: byte :class:`bytes`
:returns: byte :class:`bytes` that's the decoded input
If you've got fields in an xml document that were encoded with
:func:`bytes_to_xml` then you want to use this function to undecode them.
It converts a base64 encoded string into a byte :class:`bytes`.
.. note::
Although the current implementation uses :func:`base64.b64decode` and
there's no plans to change it, that isn't guaranteed. If you want to
make sure that you can encode and decode these messages it's best to
use :func:`bytes_to_xml` if you use this function to decode.
'''
return b64decode(byte_string, *args, **kwargs)
def guess_encoding_to_xml(string, output_encoding='utf-8', attrib=False,
control_chars='replace'):
'''Return a byte :class:`bytes` suitable for inclusion in xml
:arg string: :class:`str` or byte :class:`bytes` to be transformed into
a byte :class:`bytes` suitable for inclusion in xml. If string is
a byte :class:`bytes` we attempt to guess the encoding. If we cannot guess,
we fallback to ``latin-1``.
:kwarg output_encoding: Output encoding for the byte :class:`bytes`. This
should match the encoding of your xml file.
:kwarg attrib: If :data:`True`, escape the item for use in an xml
attribute. If :data:`False` (default) escape the item for use in
a text node.
:returns: :term:`utf-8` encoded byte :class:`bytes`
'''
# Unicode strings can just be run through unicode_to_xml()
if isunicodestring(string):
return unicode_to_xml(string, encoding=output_encoding,
attrib=attrib, control_chars=control_chars)
# Guess the encoding of the byte strings
input_encoding = guess_encoding(string)
# Return the new byte string
return byte_string_to_xml(string, input_encoding=input_encoding,
errors='replace', output_encoding=output_encoding,
attrib=attrib, control_chars=control_chars)
def to_xml(string, encoding='utf-8', attrib=False, control_chars='ignore'):
'''*Deprecated*: Use :func:`guess_encoding_to_xml` instead
'''
warnings.warn('kitchen.text.converters.to_xml is deprecated. Use'
' kitchen.text.converters.guess_encoding_to_xml instead.',
DeprecationWarning, stacklevel=2)
return guess_encoding_to_xml(string, output_encoding=encoding,
attrib=attrib, control_chars=control_chars)
__all__ = ('BYTE_EXCEPTION_CONVERTERS', 'EXCEPTION_CONVERTERS',
'byte_string_to_xml', 'bytes_to_xml', 'exception_to_bytes',
'exception_to_unicode', 'getwriter', 'guess_encoding_to_xml',
'to_bytes', 'to_str', 'to_unicode', 'to_utf8', 'to_xml',
'unicode_to_xml', 'xml_to_byte_string', 'xml_to_bytes',
'xml_to_unicode')
| gpl-2.0 |
w3nd1go/android_external_skia | tools/skp/webpages_playback.py | 38 | 22705 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archives or replays webpages and creates SKPs in a Google Storage location.
To archive webpages and store SKP files (archives should be rarely updated):
cd skia
python tools/skp/webpages_playback.py --data_store=gs://rmistry --record \
--page_sets=all --skia_tools=/home/default/trunk/out/Debug/ \
--browser_executable=/tmp/chromium/out/Release/chrome
The above command uses Google Storage bucket 'rmistry' to download needed files.
To replay archived webpages and re-generate SKP files (should be run whenever
SkPicture.PICTURE_VERSION changes):
cd skia
python tools/skp/webpages_playback.py --data_store=gs://rmistry \
--page_sets=all --skia_tools=/home/default/trunk/out/Debug/ \
--browser_executable=/tmp/chromium/out/Release/chrome
Specify the --page_sets flag (default value is 'all') to pick a list of which
webpages should be archived and/or replayed. Eg:
--page_sets=tools/skp/page_sets/skia_yahooanswers_desktop.py,\
tools/skp/page_sets/skia_googlecalendar_nexus10.py
The --browser_executable flag should point to the browser binary you want to use
to capture archives and/or capture SKP files. Majority of the time it should be
a newly built chrome binary.
The --data_store flag controls where the needed artifacts, such as
credential files, are downloaded from. It also controls where the
generated artifacts, such as recorded webpages and resulting skp renderings,
are uploaded to. URLs with scheme 'gs://' use Google Storage. Otherwise
use local filesystem.
The --upload=True flag means generated artifacts will be
uploaded or copied to the location specified by --data_store. (default value is
False if not specified).
The --non-interactive flag controls whether the script will prompt the user
(default value is False if not specified).
The --skia_tools flag if specified will allow this script to run
debugger, render_pictures, and render_pdfs on the captured
SKP(s). The tools are run after all SKPs are succesfully captured to make sure
they can be added to the buildbots with no breakages.
"""
import glob
import optparse
import os
import posixpath
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
sys.path.insert(0, os.getcwd())
from common.py.utils import gs_utils
from common.py.utils import shell_utils
ROOT_PLAYBACK_DIR_NAME = 'playback'
SKPICTURES_DIR_NAME = 'skps'
# Local archive and SKP directories.
LOCAL_PLAYBACK_ROOT_DIR = os.path.join(
tempfile.gettempdir(), ROOT_PLAYBACK_DIR_NAME)
LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'page_sets', 'data')
TMP_SKP_DIR = tempfile.mkdtemp()
# Location of the credentials.json file and the string that represents missing
# passwords.
CREDENTIALS_FILE_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'page_sets', 'data',
'credentials.json'
)
# Name of the SKP benchmark
SKP_BENCHMARK = 'skpicture_printer'
# The max base name length of Skp files.
MAX_SKP_BASE_NAME_LEN = 31
# Dictionary of device to platform prefixes for SKP files.
DEVICE_TO_PLATFORM_PREFIX = {
'desktop': 'desk',
'galaxynexus': 'mobi',
'nexus10': 'tabl'
}
# How many times the record_wpr binary should be retried.
RETRY_RECORD_WPR_COUNT = 5
# How many times the run_benchmark binary should be retried.
RETRY_RUN_MEASUREMENT_COUNT = 5
# Location of the credentials.json file in Google Storage.
CREDENTIALS_GS_PATH = '/playback/credentials/credentials.json'
X11_DISPLAY = os.getenv('DISPLAY', ':0')
GS_PREDEFINED_ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
GS_FINE_GRAINED_ACL_LIST = [
(gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN, 'google.com',
gs_utils.GSUtils.Permission.READ),
]
# Path to Chromium's page sets.
CHROMIUM_PAGE_SETS_PATH = os.path.join('tools', 'perf', 'page_sets')
# Dictionary of supported Chromium page sets to their file prefixes.
CHROMIUM_PAGE_SETS_TO_PREFIX = {
'key_mobile_sites_smooth.py': 'keymobi',
'top_25_smooth.py': 'top25desk',
}
def remove_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
class SkPicturePlayback(object):
"""Class that archives or replays webpages and creates SKPs."""
def __init__(self, parse_options):
"""Constructs a SkPicturePlayback BuildStep instance."""
assert parse_options.browser_executable, 'Must specify --browser_executable'
self._browser_executable = parse_options.browser_executable
self._browser_args = '--disable-setuid-sandbox'
if parse_options.browser_extra_args:
self._browser_args = '%s %s' % (
self._browser_args, parse_options.browser_extra_args)
self._chrome_page_sets_path = os.path.join(parse_options.chrome_src_path,
CHROMIUM_PAGE_SETS_PATH)
self._all_page_sets_specified = parse_options.page_sets == 'all'
self._page_sets = self._ParsePageSets(parse_options.page_sets)
self._record = parse_options.record
self._skia_tools = parse_options.skia_tools
self._non_interactive = parse_options.non_interactive
self._upload = parse_options.upload
self._skp_prefix = parse_options.skp_prefix
data_store_location = parse_options.data_store
if data_store_location.startswith(gs_utils.GS_PREFIX):
self.gs = GoogleStorageDataStore(data_store_location)
else:
self.gs = LocalFileSystemDataStore(data_store_location)
self._alternate_upload_dir = parse_options.alternate_upload_dir
self._telemetry_binaries_dir = os.path.join(parse_options.chrome_src_path,
'tools', 'perf')
self._local_skp_dir = os.path.join(
parse_options.output_dir, ROOT_PLAYBACK_DIR_NAME, SKPICTURES_DIR_NAME)
self._local_record_webpages_archive_dir = os.path.join(
parse_options.output_dir, ROOT_PLAYBACK_DIR_NAME, 'webpages_archive')
# List of SKP files generated by this script.
self._skp_files = []
def _ParsePageSets(self, page_sets):
if not page_sets:
raise ValueError('Must specify at least one page_set!')
elif self._all_page_sets_specified:
# Get everything from the page_sets directory.
page_sets_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'page_sets')
ps = [os.path.join(page_sets_dir, page_set)
for page_set in os.listdir(page_sets_dir)
if not os.path.isdir(os.path.join(page_sets_dir, page_set)) and
page_set.endswith('.py')]
chromium_ps = [
os.path.join(self._chrome_page_sets_path, cr_page_set)
for cr_page_set in CHROMIUM_PAGE_SETS_TO_PREFIX]
ps.extend(chromium_ps)
elif '*' in page_sets:
# Explode and return the glob.
ps = glob.glob(page_sets)
else:
ps = page_sets.split(',')
ps.sort()
return ps
def _IsChromiumPageSet(self, page_set):
"""Returns true if the specified page set is a Chromium page set."""
return page_set.startswith(self._chrome_page_sets_path)
def Run(self):
"""Run the SkPicturePlayback BuildStep."""
# Download the credentials file if it was not previously downloaded.
if not os.path.isfile(CREDENTIALS_FILE_PATH):
# Download the credentials.json file from Google Storage.
self.gs.download_file(CREDENTIALS_GS_PATH, CREDENTIALS_FILE_PATH)
if not os.path.isfile(CREDENTIALS_FILE_PATH):
print """\n\nCould not locate credentials file in the storage.
Please create a %s file that contains:
{
"google": {
"username": "google_testing_account_username",
"password": "google_testing_account_password"
},
"facebook": {
"username": "facebook_testing_account_username",
"password": "facebook_testing_account_password"
}
}\n\n""" % CREDENTIALS_FILE_PATH
raw_input("Please press a key when you are ready to proceed...")
# Delete any left over data files in the data directory.
for archive_file in glob.glob(
os.path.join(LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR, 'skia_*')):
os.remove(archive_file)
# Delete the local root directory if it already exists.
if os.path.exists(LOCAL_PLAYBACK_ROOT_DIR):
shutil.rmtree(LOCAL_PLAYBACK_ROOT_DIR)
# Create the required local storage directories.
self._CreateLocalStorageDirs()
# Start the timer.
start_time = time.time()
# Loop through all page_sets.
for page_set in self._page_sets:
page_set_basename = os.path.basename(page_set).split('.')[0]
page_set_json_name = page_set_basename + '.json'
wpr_data_file = page_set.split(os.path.sep)[-1].split('.')[0] + '_000.wpr'
page_set_dir = os.path.dirname(page_set)
if self._IsChromiumPageSet(page_set):
print 'Using Chromium\'s captured archives for Chromium\'s page sets.'
elif self._record:
# Create an archive of the specified webpages if '--record=True' is
# specified.
record_wpr_cmd = (
'PYTHONPATH=%s:$PYTHONPATH' % page_set_dir,
'DISPLAY=%s' % X11_DISPLAY,
os.path.join(self._telemetry_binaries_dir, 'record_wpr'),
'--extra-browser-args="%s"' % self._browser_args,
'--browser=exact',
'--browser-executable=%s' % self._browser_executable,
'%s_page_set' % page_set_basename,
'--page-set-base-dir=%s' % page_set_dir
)
for _ in range(RETRY_RECORD_WPR_COUNT):
try:
shell_utils.run(' '.join(record_wpr_cmd), shell=True)
# Move over the created archive into the local webpages archive
# directory.
shutil.move(
os.path.join(LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR, wpr_data_file),
self._local_record_webpages_archive_dir)
shutil.move(
os.path.join(LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR,
page_set_json_name),
self._local_record_webpages_archive_dir)
# Break out of the retry loop since there were no errors.
break
except Exception:
# There was a failure continue with the loop.
traceback.print_exc()
else:
# If we get here then record_wpr did not succeed and thus did not
# break out of the loop.
raise Exception('record_wpr failed for page_set: %s' % page_set)
else:
# Get the webpages archive so that it can be replayed.
self._DownloadWebpagesArchive(wpr_data_file, page_set_json_name)
run_benchmark_cmd = (
'PYTHONPATH=%s:$PYTHONPATH' % page_set_dir,
'DISPLAY=%s' % X11_DISPLAY,
'timeout', '300',
os.path.join(self._telemetry_binaries_dir, 'run_benchmark'),
'--extra-browser-args="%s"' % self._browser_args,
'--browser=exact',
'--browser-executable=%s' % self._browser_executable,
SKP_BENCHMARK,
'--page-set-name=%s' % page_set_basename,
'--page-set-base-dir=%s' % page_set_dir,
'--skp-outdir=%s' % TMP_SKP_DIR,
'--also-run-disabled-tests'
)
for _ in range(RETRY_RUN_MEASUREMENT_COUNT):
try:
print '\n\n=======Capturing SKP of %s=======\n\n' % page_set
shell_utils.run(' '.join(run_benchmark_cmd), shell=True)
except shell_utils.CommandFailedException:
# skpicture_printer sometimes fails with AssertionError but the
# captured SKP is still valid. This is a known issue.
pass
# Rename generated SKP files into more descriptive names.
try:
self._RenameSkpFiles(page_set)
# Break out of the retry loop since there were no errors.
break
except Exception:
# There was a failure continue with the loop.
traceback.print_exc()
print '\n\n=======Retrying %s=======\n\n' % page_set
time.sleep(10)
else:
# If we get here then run_benchmark did not succeed and thus did not
# break out of the loop.
raise Exception('run_benchmark failed for page_set: %s' % page_set)
print '\n\n=======Capturing SKP files took %s seconds=======\n\n' % (
time.time() - start_time)
if self._skia_tools:
render_pictures_cmd = [
os.path.join(self._skia_tools, 'render_pictures'),
'-r', self._local_skp_dir
]
render_pdfs_cmd = [
os.path.join(self._skia_tools, 'render_pdfs'),
'-r', self._local_skp_dir
]
for tools_cmd in (render_pictures_cmd, render_pdfs_cmd):
print '\n\n=======Running %s=======' % ' '.join(tools_cmd)
proc = subprocess.Popen(tools_cmd)
(code, _) = shell_utils.log_process_after_completion(proc, echo=False)
if code != 0:
raise Exception('%s failed!' % ' '.join(tools_cmd))
if not self._non_interactive:
print '\n\n=======Running debugger======='
os.system('%s %s' % (os.path.join(self._skia_tools, 'debugger'),
self._local_skp_dir))
print '\n\n'
if self._upload:
print '\n\n=======Uploading to %s=======\n\n' % self.gs.target_type()
# Copy the directory structure in the root directory into Google Storage.
dest_dir_name = ROOT_PLAYBACK_DIR_NAME
if self._alternate_upload_dir:
dest_dir_name = self._alternate_upload_dir
self.gs.upload_dir_contents(
LOCAL_PLAYBACK_ROOT_DIR, dest_dir=dest_dir_name,
upload_if=gs_utils.GSUtils.UploadIf.IF_MODIFIED,
predefined_acl=GS_PREDEFINED_ACL,
fine_grained_acl_list=GS_FINE_GRAINED_ACL_LIST)
print '\n\n=======New SKPs have been uploaded to %s =======\n\n' % (
posixpath.join(self.gs.target_name(), dest_dir_name,
SKPICTURES_DIR_NAME))
else:
print '\n\n=======Not Uploading to %s=======\n\n' % self.gs.target_type()
print 'Generated resources are available in %s\n\n' % (
LOCAL_PLAYBACK_ROOT_DIR)
return 0
def _GetSkiaSkpFileName(self, page_set):
"""Returns the SKP file name for Skia page sets."""
# /path/to/skia_yahooanswers_desktop.py -> skia_yahooanswers_desktop.py
ps_filename = os.path.basename(page_set)
# skia_yahooanswers_desktop.py -> skia_yahooanswers_desktop
ps_basename, _ = os.path.splitext(ps_filename)
# skia_yahooanswers_desktop -> skia, yahooanswers, desktop
_, page_name, device = ps_basename.split('_')
basename = '%s_%s' % (DEVICE_TO_PLATFORM_PREFIX[device], page_name)
return basename[:MAX_SKP_BASE_NAME_LEN] + '.skp'
def _GetChromiumSkpFileName(self, page_set, site):
"""Returns the SKP file name for Chromium page sets."""
# /path/to/http___mobile_news_sandbox_pt0 -> http___mobile_news_sandbox_pt0
_, webpage = os.path.split(site)
# http___mobile_news_sandbox_pt0 -> mobile_news_sandbox_pt0
for prefix in ('http___', 'https___', 'www_'):
if webpage.startswith(prefix):
webpage = webpage[len(prefix):]
# /path/to/skia_yahooanswers_desktop.py -> skia_yahooanswers_desktop.py
ps_filename = os.path.basename(page_set)
# http___mobile_news_sandbox -> pagesetprefix_http___mobile_news_sandbox
basename = '%s_%s' % (CHROMIUM_PAGE_SETS_TO_PREFIX[ps_filename], webpage)
return basename[:MAX_SKP_BASE_NAME_LEN] + '.skp'
def _RenameSkpFiles(self, page_set):
"""Rename generated SKP files into more descriptive names.
Look into the subdirectory of TMP_SKP_DIR and find the most interesting
.skp in there to be this page_set's representative .skp.
"""
subdirs = glob.glob(os.path.join(TMP_SKP_DIR, '*'))
for site in subdirs:
if self._IsChromiumPageSet(page_set):
filename = self._GetChromiumSkpFileName(page_set, site)
else:
filename = self._GetSkiaSkpFileName(page_set)
filename = filename.lower()
if self._skp_prefix:
filename = '%s%s' % (self._skp_prefix, filename)
# We choose the largest .skp as the most likely to be interesting.
largest_skp = max(glob.glob(os.path.join(site, '*.skp')),
key=lambda path: os.stat(path).st_size)
dest = os.path.join(self._local_skp_dir, filename)
print 'Moving', largest_skp, 'to', dest
shutil.move(largest_skp, dest)
self._skp_files.append(filename)
shutil.rmtree(site)
def _CreateLocalStorageDirs(self):
"""Creates required local storage directories for this script."""
for d in (self._local_record_webpages_archive_dir,
self._local_skp_dir):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def _DownloadWebpagesArchive(self, wpr_data_file, page_set_json_name):
"""Downloads the webpages archive and its required page set from GS."""
wpr_source = posixpath.join(ROOT_PLAYBACK_DIR_NAME, 'webpages_archive',
wpr_data_file)
page_set_source = posixpath.join(ROOT_PLAYBACK_DIR_NAME,
'webpages_archive',
page_set_json_name)
gs = self.gs
if (gs.does_storage_object_exist(wpr_source) and
gs.does_storage_object_exist(page_set_source)):
gs.download_file(wpr_source,
os.path.join(LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR,
wpr_data_file))
gs.download_file(page_set_source,
os.path.join(LOCAL_REPLAY_WEBPAGES_ARCHIVE_DIR,
page_set_json_name))
else:
raise Exception('%s and %s do not exist in %s!' % (gs.target_type(),
wpr_source, page_set_source))
class DataStore:
"""An abstract base class for uploading recordings to a data storage.
The interface emulates the google storage api."""
def target_name(self):
raise NotImplementedError()
def target_type(self):
raise NotImplementedError()
def does_storage_object_exist(self, *args):
raise NotImplementedError()
def download_file(self, *args):
raise NotImplementedError()
def upload_dir_contents(self, source_dir, **kwargs):
raise NotImplementedError()
class GoogleStorageDataStore(DataStore):
def __init__(self, data_store_url):
self._data_store_url = data_store_url
self._bucket = remove_prefix(self._data_store_url.lstrip(),
gs_utils.GS_PREFIX)
self.gs = gs_utils.GSUtils()
def target_name(self):
return self._data_store_url
def target_type(self):
return 'Google Storage'
def does_storage_object_exist(self, *args):
return self.gs.does_storage_object_exist(self._bucket, *args)
def download_file(self, *args):
self.gs.download_file(self._bucket, *args)
def upload_dir_contents(self, source_dir, **kwargs):
self.gs.upload_dir_contents(source_dir, self._bucket, **kwargs)
class LocalFileSystemDataStore(DataStore):
def __init__(self, data_store_location):
self._base_dir = data_store_location
def target_name(self):
return self._base_dir
def target_type(self):
return self._base_dir
def does_storage_object_exist(self, name, *args):
return os.path.isfile(os.path.join(self._base_dir, name))
def download_file(self, name, local_path, *args):
shutil.copyfile(os.path.join(self._base_dir, name), local_path)
def upload_dir_contents(self, source_dir, dest_dir, **kwargs):
def copytree(source_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for item in os.listdir(source_dir):
source = os.path.join(source_dir, item)
dest = os.path.join(dest_dir, item)
if os.path.isdir(source):
copytree(source, dest)
else:
shutil.copy2(source, dest)
copytree(source_dir, os.path.join(self._base_dir, dest_dir))
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--page_sets',
help='Specifies the page sets to use to archive. Supports globs.',
default='all')
option_parser.add_option(
'', '--record', action='store_true',
help='Specifies whether a new website archive should be created.',
default=False)
option_parser.add_option(
'', '--skia_tools',
help=('Path to compiled Skia executable tools. '
'render_pictures/render_pdfs is run on the set '
'after all SKPs are captured. If the script is run without '
'--non-interactive then the debugger is also run at the end. Debug '
'builds are recommended because they seem to catch more failures '
'than Release builds.'),
default=None)
option_parser.add_option(
'', '--upload', action='store_true',
help=('Uploads to Google Storage or copies to local filesystem storage '
' if this is True.'),
default=False)
option_parser.add_option(
'', '--data_store',
help=('The location of the file storage to use to download and upload '
'files. Can be \'gs://<bucket>\' for Google Storage, or '
'a directory for local filesystem storage'),
default='gs://chromium-skia-gm')
option_parser.add_option(
'', '--alternate_upload_dir',
help= ('Uploads to a different directory in Google Storage or local '
'storage if this flag is specified'),
default=None)
option_parser.add_option(
'', '--output_dir',
help=('Temporary directory where SKPs and webpage archives will be '
'outputted to.'),
default=tempfile.gettempdir())
option_parser.add_option(
'', '--browser_executable',
help='The exact browser executable to run.',
default=None)
option_parser.add_option(
'', '--browser_extra_args',
help='Additional arguments to pass to the browser.',
default=None)
option_parser.add_option(
'', '--chrome_src_path',
help='Path to the chromium src directory.',
default=None)
option_parser.add_option(
'', '--non-interactive', action='store_true',
help='Runs the script without any prompts. If this flag is specified and '
'--skia_tools is specified then the debugger is not run.',
default=False)
option_parser.add_option(
'', '--skp_prefix',
help='Prefix to add to the names of generated SKPs.',
default=None)
options, unused_args = option_parser.parse_args()
playback = SkPicturePlayback(options)
sys.exit(playback.Run())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.